mirror of
1
Fork 0

[bugfix] Fix thumbnails not taking exif rotation into account (#746)

* use disintegration/imaging instead of nfnt/resize

* update tests

* use disintegration lib for thumbing (if necessary)
This commit is contained in:
tobi 2022-08-10 14:05:14 +02:00 committed by GitHub
parent 7090f0a592
commit 91c8d5d20d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 6984 additions and 2163 deletions

View File

@ -207,6 +207,7 @@ The following libraries and frameworks are used by GoToSocial, with gratitude
- [buckket/go-blurhash](https://github.com/buckket/go-blurhash); used for generating image blurhashes. [GPL-3.0 License](https://spdx.org/licenses/GPL-3.0-only.html). - [buckket/go-blurhash](https://github.com/buckket/go-blurhash); used for generating image blurhashes. [GPL-3.0 License](https://spdx.org/licenses/GPL-3.0-only.html).
- [coreos/go-oidc](https://github.com/coreos/go-oidc); OIDC client library. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html). - [coreos/go-oidc](https://github.com/coreos/go-oidc); OIDC client library. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
- [disintegration/imaging](https://github.com/disintegration/imaging); image resizing. [MIT License](https://spdx.org/licenses/MIT.html).
- [gin-gonic/gin](https://github.com/gin-gonic/gin); speedy router engine. [MIT License](https://spdx.org/licenses/MIT.html). - [gin-gonic/gin](https://github.com/gin-gonic/gin); speedy router engine. [MIT License](https://spdx.org/licenses/MIT.html).
- [gin-contrib/cors](https://github.com/gin-contrib/cors); Gin CORS middleware. [MIT License](https://spdx.org/licenses/MIT.html). - [gin-contrib/cors](https://github.com/gin-contrib/cors); Gin CORS middleware. [MIT License](https://spdx.org/licenses/MIT.html).
- [gin-contrib/gzip](https://github.com/gin-contrib/gzip); Gin gzip middleware. [MIT License](https://spdx.org/licenses/MIT.html). - [gin-contrib/gzip](https://github.com/gin-contrib/gzip); Gin gzip middleware. [MIT License](https://spdx.org/licenses/MIT.html).
@ -232,7 +233,6 @@ The following libraries and frameworks are used by GoToSocial, with gratitude
- [modernc.org/ccgo](https://gitlab.com/cznic/ccgo); c99 AST -> Go translater. [BSD-3-Clause License](https://spdx.org/licenses/BSD-3-Clause.html). - [modernc.org/ccgo](https://gitlab.com/cznic/ccgo); c99 AST -> Go translater. [BSD-3-Clause License](https://spdx.org/licenses/BSD-3-Clause.html).
- [modernc.org/libc](https://gitlab.com/cznic/libc); C-runtime services. [BSD-3-Clause License](https://spdx.org/licenses/BSD-3-Clause.html). - [modernc.org/libc](https://gitlab.com/cznic/libc); C-runtime services. [BSD-3-Clause License](https://spdx.org/licenses/BSD-3-Clause.html).
- [mvdan/xurls](https://github.com/mvdan/xurls); URL parsing regular expressions. [BSD-3-Clause License](https://spdx.org/licenses/BSD-3-Clause.html). - [mvdan/xurls](https://github.com/mvdan/xurls); URL parsing regular expressions. [BSD-3-Clause License](https://spdx.org/licenses/BSD-3-Clause.html).
- [nfnt/resize](https://github.com/nfnt/resize); convenient image resizing. [ISC License](https://spdx.org/licenses/ISC.html).
- [oklog/ulid](https://github.com/oklog/ulid); sequential, database-friendly ID generation. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html). - [oklog/ulid](https://github.com/oklog/ulid); sequential, database-friendly ID generation. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
- [ReneKroon/ttlcache](https://github.com/ReneKroon/ttlcache); in-memory caching. [MIT License](https://spdx.org/licenses/MIT.html). - [ReneKroon/ttlcache](https://github.com/ReneKroon/ttlcache); in-memory caching. [MIT License](https://spdx.org/licenses/MIT.html).
- [robfig/cron](https://github.com/robfig/cron); cron job scheduling. [MIT License](https://spdx.org/licenses/MIT.html). - [robfig/cron](https://github.com/robfig/cron); cron job scheduling. [MIT License](https://spdx.org/licenses/MIT.html).

3
go.mod
View File

@ -16,6 +16,7 @@ require (
codeberg.org/gruf/go-store v1.3.8 codeberg.org/gruf/go-store v1.3.8
github.com/buckket/go-blurhash v1.1.0 github.com/buckket/go-blurhash v1.1.0
github.com/coreos/go-oidc/v3 v3.1.0 github.com/coreos/go-oidc/v3 v3.1.0
github.com/disintegration/imaging v1.6.2
github.com/gin-contrib/cors v1.3.1 github.com/gin-contrib/cors v1.3.1
github.com/gin-contrib/gzip v0.0.5 github.com/gin-contrib/gzip v0.0.5
github.com/gin-contrib/sessions v0.0.5 github.com/gin-contrib/sessions v0.0.5
@ -31,7 +32,6 @@ require (
github.com/miekg/dns v1.1.49 github.com/miekg/dns v1.1.49
github.com/minio/minio-go/v7 v7.0.29 github.com/minio/minio-go/v7 v7.0.29
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/robfig/cron/v3 v3.0.1 github.com/robfig/cron/v3 v3.0.1
github.com/russross/blackfriday/v2 v2.1.0 github.com/russross/blackfriday/v2 v2.1.0
@ -126,6 +126,7 @@ require (
github.com/ugorji/go/codec v1.2.7 // indirect github.com/ugorji/go/codec v1.2.7 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32 // indirect golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32 // indirect
golang.org/x/tools v0.1.10 // indirect golang.org/x/tools v0.1.10 // indirect

6
go.sum
View File

@ -122,6 +122,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE= github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
github.com/dsoprea/go-exif/v2 v2.0.0-20200321225314-640175a69fe4/go.mod h1:Lm2lMM2zx8p4a34ZemkaUV95AnMl4ZvLbCUbwOvLC2E= github.com/dsoprea/go-exif/v2 v2.0.0-20200321225314-640175a69fe4/go.mod h1:Lm2lMM2zx8p4a34ZemkaUV95AnMl4ZvLbCUbwOvLC2E=
github.com/dsoprea/go-exif/v3 v3.0.0-20200717053412-08f1b6708903/go.mod h1:0nsO1ce0mh5czxGeLo4+OCZ/C6Eo6ZlMWsz7rH/Gxv8= github.com/dsoprea/go-exif/v3 v3.0.0-20200717053412-08f1b6708903/go.mod h1:0nsO1ce0mh5czxGeLo4+OCZ/C6Eo6ZlMWsz7rH/Gxv8=
@ -432,8 +434,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@ -635,6 +635,8 @@ golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d h1:vtUKgx8dahOomfFzLREU8nSv2
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=

View File

@ -212,7 +212,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessful() {
Y: 0.5, Y: 0.5,
}, },
}, attachmentReply.Meta) }, attachmentReply.Meta)
suite.Equal("LjBzUo#6RQR._NvzRjWF?urqV@a$", attachmentReply.Blurhash) suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachmentReply.Blurhash)
suite.NotEmpty(attachmentReply.ID) suite.NotEmpty(attachmentReply.ID)
suite.NotEmpty(attachmentReply.URL) suite.NotEmpty(attachmentReply.URL)
suite.NotEmpty(attachmentReply.PreviewURL) suite.NotEmpty(attachmentReply.PreviewURL)
@ -306,7 +306,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessfulV2() {
Y: 0.5, Y: 0.5,
}, },
}, attachmentReply.Meta) }, attachmentReply.Meta)
suite.Equal("LjBzUo#6RQR._NvzRjWF?urqV@a$", attachmentReply.Blurhash) suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachmentReply.Blurhash)
suite.NotEmpty(attachmentReply.ID) suite.NotEmpty(attachmentReply.ID)
suite.Nil(attachmentReply.URL) suite.Nil(attachmentReply.URL)
suite.NotEmpty(attachmentReply.PreviewURL) suite.NotEmpty(attachmentReply.PreviewURL)

View File

@ -29,7 +29,7 @@ import (
"io" "io"
"github.com/buckket/go-blurhash" "github.com/buckket/go-blurhash"
"github.com/nfnt/resize" "github.com/disintegration/imaging"
) )
const ( const (
@ -114,33 +114,40 @@ func deriveThumbnail(r io.Reader, contentType string, createBlurhash bool) (*ima
var err error var err error
switch contentType { switch contentType {
case mimeImageJpeg: case mimeImageJpeg, mimeImageGif:
i, err = jpeg.Decode(r) i, err = imaging.Decode(r, imaging.AutoOrientation(true))
case mimeImagePng: case mimeImagePng:
i, err = StrippedPngDecode(r) strippedPngReader := io.Reader(&PNGAncillaryChunkStripper{
case mimeImageGif: Reader: r,
i, err = gif.Decode(r) })
i, err = imaging.Decode(strippedPngReader, imaging.AutoOrientation(true))
default: default:
err = fmt.Errorf("content type %s can't be thumbnailed", contentType) err = fmt.Errorf("content type %s can't be thumbnailed", contentType)
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("error decoding image as %s: %s", contentType, err) return nil, fmt.Errorf("error decoding %s: %s", contentType, err)
} }
if i == nil { originalX := i.Bounds().Size().X
return nil, errors.New("processed image was nil") originalY := i.Bounds().Size().Y
var thumb image.Image
if originalX <= thumbnailMaxWidth && originalY <= thumbnailMaxHeight {
// it's already small, no need to resize
thumb = i
} else {
thumb = imaging.Fit(i, thumbnailMaxWidth, thumbnailMaxHeight, imaging.Linear)
} }
thumb := resize.Thumbnail(thumbnailMaxWidth, thumbnailMaxHeight, i, resize.NearestNeighbor) thumbX := thumb.Bounds().Size().X
width := thumb.Bounds().Size().X thumbY := thumb.Bounds().Size().Y
height := thumb.Bounds().Size().Y size := thumbX * thumbY
size := width * height aspect := float64(thumbX) / float64(thumbY)
aspect := float64(width) / float64(height)
im := &imageMeta{ im := &imageMeta{
width: width, width: thumbX,
height: height, height: thumbY,
size: size, size: size,
aspect: aspect, aspect: aspect,
} }
@ -148,7 +155,7 @@ func deriveThumbnail(r io.Reader, contentType string, createBlurhash bool) (*ima
if createBlurhash { if createBlurhash {
// for generating blurhashes, it's more cost effective to lose detail rather than // for generating blurhashes, it's more cost effective to lose detail rather than
// pass a big image into the blurhash algorithm, so make a teeny tiny version // pass a big image into the blurhash algorithm, so make a teeny tiny version
tiny := resize.Thumbnail(32, 32, thumb, resize.NearestNeighbor) tiny := imaging.Resize(thumb, 32, 0, imaging.NearestNeighbor)
bh, err := blurhash.Encode(4, 3, tiny) bh, err := blurhash.Encode(4, 3, tiny)
if err != nil { if err != nil {
return nil, fmt.Errorf("error creating blurhash: %s", err) return nil, fmt.Errorf("error creating blurhash: %s", err)

View File

@ -80,7 +80,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlocking() {
suite.Equal("image/jpeg", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.File.ContentType)
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
suite.Equal(269739, attachment.File.FileSize) suite.Equal(269739, attachment.File.FileSize)
suite.Equal("LjBzUo#6RQR._NvzRjWF?urqV@a$", attachment.Blurhash) suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash)
// now make sure the attachment is in the database // now make sure the attachment is in the database
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID) dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)
@ -152,7 +152,7 @@ func (suite *ManagerTestSuite) TestPngNoAlphaChannelProcessBlocking() {
suite.Equal("image/png", attachment.File.ContentType) suite.Equal("image/png", attachment.File.ContentType)
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
suite.Equal(17471, attachment.File.FileSize) suite.Equal(17471, attachment.File.FileSize)
suite.Equal("LFP?{^.A-?xd.9o#RVRQ~oj:_0xW", attachment.Blurhash) suite.Equal("LFQT7e.A%O%4?co$M}M{_1W9~TxV", attachment.Blurhash)
// now make sure the attachment is in the database // now make sure the attachment is in the database
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID) dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)
@ -224,7 +224,7 @@ func (suite *ManagerTestSuite) TestPngAlphaChannelProcessBlocking() {
suite.Equal("image/png", attachment.File.ContentType) suite.Equal("image/png", attachment.File.ContentType)
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
suite.Equal(18904, attachment.File.FileSize) suite.Equal(18904, attachment.File.FileSize)
suite.Equal("LFP?{^.A-?xd.9o#RVRQ~oj:_0xW", attachment.Blurhash) suite.Equal("LFQT7e.A%O%4?co$M}M{_1W9~TxV", attachment.Blurhash)
// now make sure the attachment is in the database // now make sure the attachment is in the database
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID) dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)
@ -307,7 +307,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlockingWithCallback() {
suite.Equal("image/jpeg", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.File.ContentType)
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
suite.Equal(269739, attachment.File.FileSize) suite.Equal(269739, attachment.File.FileSize)
suite.Equal("LjBzUo#6RQR._NvzRjWF?urqV@a$", attachment.Blurhash) suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash)
// now make sure the attachment is in the database // now make sure the attachment is in the database
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID) dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)
@ -386,7 +386,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessAsync() {
suite.Equal("image/jpeg", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.File.ContentType)
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
suite.Equal(269739, attachment.File.FileSize) suite.Equal(269739, attachment.File.FileSize)
suite.Equal("LjBzUo#6RQR._NvzRjWF?urqV@a$", attachment.Blurhash) suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash)
// now make sure the attachment is in the database // now make sure the attachment is in the database
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID) dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)
@ -467,7 +467,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegQueueSpamming() {
suite.Equal("image/jpeg", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.File.ContentType)
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
suite.Equal(269739, attachment.File.FileSize) suite.Equal(269739, attachment.File.FileSize)
suite.Equal("LjBzUo#6RQR._NvzRjWF?urqV@a$", attachment.Blurhash) suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash)
// now make sure the attachment is in the database // now make sure the attachment is in the database
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID) dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)
@ -556,7 +556,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlockingWithDiskStorage() {
suite.Equal("image/jpeg", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.File.ContentType)
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
suite.Equal(269739, attachment.File.FileSize) suite.Equal(269739, attachment.File.FileSize)
suite.Equal("LjBzUo#6RQR._NvzRjWF?urqV@a$", attachment.Blurhash) suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash)
// now make sure the attachment is in the database // now make sure the attachment is in the database
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID) dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 22 KiB

12
vendor/github.com/disintegration/imaging/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,12 @@
language: go
go:
- "1.10.x"
- "1.11.x"
- "1.12.x"
before_install:
- go get github.com/mattn/goveralls
script:
- go test -v -race -cover
- $GOPATH/bin/goveralls -service=travis-ci

21
vendor/github.com/disintegration/imaging/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2012 Grigory Dryapak
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

226
vendor/github.com/disintegration/imaging/README.md generated vendored Normal file
View File

@ -0,0 +1,226 @@
# Imaging
[![GoDoc](https://godoc.org/github.com/disintegration/imaging?status.svg)](https://godoc.org/github.com/disintegration/imaging)
[![Build Status](https://travis-ci.org/disintegration/imaging.svg?branch=master)](https://travis-ci.org/disintegration/imaging)
[![Coverage Status](https://coveralls.io/repos/github/disintegration/imaging/badge.svg?branch=master&service=github)](https://coveralls.io/github/disintegration/imaging?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/disintegration/imaging)](https://goreportcard.com/report/github.com/disintegration/imaging)
Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
All the image processing functions provided by the package accept any image type that implements `image.Image` interface
as an input, and return a new image of `*image.NRGBA` type (32bit RGBA colors, non-premultiplied alpha).
## Installation
go get -u github.com/disintegration/imaging
## Documentation
http://godoc.org/github.com/disintegration/imaging
## Usage examples
A few usage examples can be found below. See the documentation for the full list of supported functions.
### Image resizing
```go
// Resize srcImage to size = 128x128px using the Lanczos filter.
dstImage128 := imaging.Resize(srcImage, 128, 128, imaging.Lanczos)
// Resize srcImage to width = 800px preserving the aspect ratio.
dstImage800 := imaging.Resize(srcImage, 800, 0, imaging.Lanczos)
// Scale down srcImage to fit the 800x600px bounding box.
dstImageFit := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
// Resize and crop the srcImage to fill the 100x100px area.
dstImageFill := imaging.Fill(srcImage, 100, 100, imaging.Center, imaging.Lanczos)
```
Imaging supports image resizing using various resampling filters. The most notable ones:
- `Lanczos` - A high-quality resampling filter for photographic images yielding sharp results.
- `CatmullRom` - A sharp cubic filter that is faster than Lanczos filter while providing similar results.
- `MitchellNetravali` - A cubic filter that produces smoother results with less ringing artifacts than CatmullRom.
- `Linear` - Bilinear resampling filter, produces smooth output. Faster than cubic filters.
- `Box` - Simple and fast averaging filter appropriate for downscaling. When upscaling it's similar to NearestNeighbor.
- `NearestNeighbor` - Fastest resampling filter, no antialiasing.
The full list of supported filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. Custom filters can be created using ResampleFilter struct.
**Resampling filters comparison**
Original image:
![srcImage](testdata/branches.png)
The same image resized from 600x400px to 150x100px using different resampling filters.
From faster (lower quality) to slower (higher quality):
Filter | Resize result
--------------------------|---------------------------------------------
`imaging.NearestNeighbor` | ![dstImage](testdata/out_resize_nearest.png)
`imaging.Linear` | ![dstImage](testdata/out_resize_linear.png)
`imaging.CatmullRom` | ![dstImage](testdata/out_resize_catrom.png)
`imaging.Lanczos` | ![dstImage](testdata/out_resize_lanczos.png)
### Gaussian Blur
```go
dstImage := imaging.Blur(srcImage, 0.5)
```
Sigma parameter allows to control the strength of the blurring effect.
Original image | Sigma = 0.5 | Sigma = 1.5
-----------------------------------|----------------------------------------|---------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_blur_0.5.png) | ![dstImage](testdata/out_blur_1.5.png)
### Sharpening
```go
dstImage := imaging.Sharpen(srcImage, 0.5)
```
`Sharpen` uses gaussian function internally. Sigma parameter allows to control the strength of the sharpening effect.
Original image | Sigma = 0.5 | Sigma = 1.5
-----------------------------------|-------------------------------------------|------------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_sharpen_0.5.png) | ![dstImage](testdata/out_sharpen_1.5.png)
### Gamma correction
```go
dstImage := imaging.AdjustGamma(srcImage, 0.75)
```
Original image | Gamma = 0.75 | Gamma = 1.25
-----------------------------------|------------------------------------------|-----------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_gamma_0.75.png) | ![dstImage](testdata/out_gamma_1.25.png)
### Contrast adjustment
```go
dstImage := imaging.AdjustContrast(srcImage, 20)
```
Original image | Contrast = 15 | Contrast = -15
-----------------------------------|--------------------------------------------|-------------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_contrast_p15.png) | ![dstImage](testdata/out_contrast_m15.png)
### Brightness adjustment
```go
dstImage := imaging.AdjustBrightness(srcImage, 20)
```
Original image | Brightness = 10 | Brightness = -10
-----------------------------------|----------------------------------------------|---------------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_brightness_p10.png) | ![dstImage](testdata/out_brightness_m10.png)
### Saturation adjustment
```go
dstImage := imaging.AdjustSaturation(srcImage, 20)
```
Original image | Saturation = 30 | Saturation = -30
-----------------------------------|----------------------------------------------|---------------------------------------------
![srcImage](testdata/flowers_small.png) | ![dstImage](testdata/out_saturation_p30.png) | ![dstImage](testdata/out_saturation_m30.png)
## FAQ
### Incorrect image orientation after processing (e.g. an image appears rotated after resizing)
Most probably, the given image contains the EXIF orientation tag.
The stadard `image/*` packages do not support loading and saving
this kind of information. To fix the issue, try opening images with
the `AutoOrientation` decode option. If this option is set to `true`,
the image orientation is changed after decoding, according to the
orientation tag (if present). Here's the example:
```go
img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
```
### What's the difference between `imaging` and `gift` packages?
[imaging](https://github.com/disintegration/imaging)
is designed to be a lightweight and simple image manipulation package.
It provides basic image processing functions and a few helper functions
such as `Open` and `Save`. It consistently returns *image.NRGBA image
type (8 bits per channel, RGBA).
[gift](https://github.com/disintegration/gift)
supports more advanced image processing, for example, sRGB/Linear color
space conversions. It also supports different output image types
(e.g. 16 bits per channel) and provides easy-to-use API for chaining
multiple processing steps together.
## Example code
```go
package main
import (
"image"
"image/color"
"log"
"github.com/disintegration/imaging"
)
func main() {
// Open a test image.
src, err := imaging.Open("testdata/flowers.png")
if err != nil {
log.Fatalf("failed to open image: %v", err)
}
// Crop the original image to 300x300px size using the center anchor.
src = imaging.CropAnchor(src, 300, 300, imaging.Center)
// Resize the cropped image to width = 200px preserving the aspect ratio.
src = imaging.Resize(src, 200, 0, imaging.Lanczos)
// Create a blurred version of the image.
img1 := imaging.Blur(src, 5)
// Create a grayscale version of the image with higher contrast and sharpness.
img2 := imaging.Grayscale(src)
img2 = imaging.AdjustContrast(img2, 20)
img2 = imaging.Sharpen(img2, 2)
// Create an inverted version of the image.
img3 := imaging.Invert(src)
// Create an embossed version of the image using a convolution filter.
img4 := imaging.Convolve3x3(
src,
[9]float64{
-1, -1, 0,
-1, 1, 1,
0, 1, 1,
},
nil,
)
// Create a new image and paste the four produced images into it.
dst := imaging.New(400, 400, color.NRGBA{0, 0, 0, 0})
dst = imaging.Paste(dst, img1, image.Pt(0, 0))
dst = imaging.Paste(dst, img2, image.Pt(0, 200))
dst = imaging.Paste(dst, img3, image.Pt(200, 0))
dst = imaging.Paste(dst, img4, image.Pt(200, 200))
// Save the resulting image as JPEG.
err = imaging.Save(dst, "testdata/out_example.jpg")
if err != nil {
log.Fatalf("failed to save image: %v", err)
}
}
```
Output:
![dstImage](testdata/out_example.jpg)

253
vendor/github.com/disintegration/imaging/adjust.go generated vendored Normal file
View File

@ -0,0 +1,253 @@
package imaging
import (
"image"
"image/color"
"math"
)
// Grayscale produces a grayscale version of the image.
func Grayscale(img image.Image) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
for x := 0; x < src.w; x++ {
d := dst.Pix[i : i+3 : i+3]
r := d[0]
g := d[1]
b := d[2]
f := 0.299*float64(r) + 0.587*float64(g) + 0.114*float64(b)
y := uint8(f + 0.5)
d[0] = y
d[1] = y
d[2] = y
i += 4
}
}
})
return dst
}
// Invert produces an inverted (negated) version of the image.
func Invert(img image.Image) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
for x := 0; x < src.w; x++ {
d := dst.Pix[i : i+3 : i+3]
d[0] = 255 - d[0]
d[1] = 255 - d[1]
d[2] = 255 - d[2]
i += 4
}
}
})
return dst
}
// AdjustSaturation changes the saturation of the image using the percentage parameter and returns the adjusted image.
// The percentage must be in the range (-100, 100).
// The percentage = 0 gives the original image.
// The percentage = 100 gives the image with the saturation value doubled for each pixel.
// The percentage = -100 gives the image with the saturation value zeroed for each pixel (grayscale).
//
// Examples:
// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%.
// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%.
//
func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100), 100)
multiplier := 1 + percentage/100
return AdjustFunc(img, func(c color.NRGBA) color.NRGBA {
h, s, l := rgbToHSL(c.R, c.G, c.B)
s *= multiplier
if s > 1 {
s = 1
}
r, g, b := hslToRGB(h, s, l)
return color.NRGBA{r, g, b, c.A}
})
}
// AdjustContrast changes the contrast of the image using the percentage parameter and returns the adjusted image.
// The percentage must be in range (-100, 100). The percentage = 0 gives the original image.
// The percentage = -100 gives solid gray image.
//
// Examples:
//
// dstImage = imaging.AdjustContrast(srcImage, -10) // Decrease image contrast by 10%.
// dstImage = imaging.AdjustContrast(srcImage, 20) // Increase image contrast by 20%.
//
func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100.0), 100.0)
lut := make([]uint8, 256)
v := (100.0 + percentage) / 100.0
for i := 0; i < 256; i++ {
switch {
case 0 <= v && v <= 1:
lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*v) * 255.0)
case 1 < v && v < 2:
lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*(1/(2.0-v))) * 255.0)
default:
lut[i] = uint8(float64(i)/255.0+0.5) * 255
}
}
return adjustLUT(img, lut)
}
// AdjustBrightness changes the brightness of the image using the percentage parameter and returns the adjusted image.
// The percentage must be in range (-100, 100). The percentage = 0 gives the original image.
// The percentage = -100 gives solid black image. The percentage = 100 gives solid white image.
//
// Examples:
//
// dstImage = imaging.AdjustBrightness(srcImage, -15) // Decrease image brightness by 15%.
// dstImage = imaging.AdjustBrightness(srcImage, 10) // Increase image brightness by 10%.
//
func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100.0), 100.0)
lut := make([]uint8, 256)
shift := 255.0 * percentage / 100.0
for i := 0; i < 256; i++ {
lut[i] = clamp(float64(i) + shift)
}
return adjustLUT(img, lut)
}
// AdjustGamma performs a gamma correction on the image and returns the adjusted image.
// Gamma parameter must be positive. Gamma = 1.0 gives the original image.
// Gamma less than 1.0 darkens the image and gamma greater than 1.0 lightens it.
//
// Example:
//
// dstImage = imaging.AdjustGamma(srcImage, 0.7)
//
func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
e := 1.0 / math.Max(gamma, 0.0001)
lut := make([]uint8, 256)
for i := 0; i < 256; i++ {
lut[i] = clamp(math.Pow(float64(i)/255.0, e) * 255.0)
}
return adjustLUT(img, lut)
}
// AdjustSigmoid changes the contrast of the image using a sigmoidal function and returns the adjusted image.
// It's a non-linear contrast change useful for photo adjustments as it preserves highlight and shadow detail.
// The midpoint parameter is the midpoint of contrast that must be between 0 and 1, typically 0.5.
// The factor parameter indicates how much to increase or decrease the contrast, typically in range (-10, 10).
// If the factor parameter is positive the image contrast is increased otherwise the contrast is decreased.
//
// Examples:
//
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // Increase the contrast.
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // Decrease the contrast.
//
func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA {
if factor == 0 {
return Clone(img)
}
lut := make([]uint8, 256)
a := math.Min(math.Max(midpoint, 0.0), 1.0)
b := math.Abs(factor)
sig0 := sigmoid(a, b, 0)
sig1 := sigmoid(a, b, 1)
e := 1.0e-6
if factor > 0 {
for i := 0; i < 256; i++ {
x := float64(i) / 255.0
sigX := sigmoid(a, b, x)
f := (sigX - sig0) / (sig1 - sig0)
lut[i] = clamp(f * 255.0)
}
} else {
for i := 0; i < 256; i++ {
x := float64(i) / 255.0
arg := math.Min(math.Max((sig1-sig0)*x+sig0, e), 1.0-e)
f := a - math.Log(1.0/arg-1.0)/b
lut[i] = clamp(f * 255.0)
}
}
return adjustLUT(img, lut)
}
func sigmoid(a, b, x float64) float64 {
return 1 / (1 + math.Exp(b*(a-x)))
}
// adjustLUT applies the given lookup table to the colors of the image.
func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
lut = lut[0:256]
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
for x := 0; x < src.w; x++ {
d := dst.Pix[i : i+3 : i+3]
d[0] = lut[d[0]]
d[1] = lut[d[1]]
d[2] = lut[d[2]]
i += 4
}
}
})
return dst
}
// AdjustFunc applies the fn function to each pixel of the img image and returns the adjusted image.
//
// Example:
//
// dstImage = imaging.AdjustFunc(
// srcImage,
// func(c color.NRGBA) color.NRGBA {
// // Shift the red channel by 16.
// r := int(c.R) + 16
// if r > 255 {
// r = 255
// }
// return color.NRGBA{uint8(r), c.G, c.B, c.A}
// }
// )
//
func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
for x := 0; x < src.w; x++ {
d := dst.Pix[i : i+4 : i+4]
r := d[0]
g := d[1]
b := d[2]
a := d[3]
c := fn(color.NRGBA{r, g, b, a})
d[0] = c.R
d[1] = c.G
d[2] = c.B
d[3] = c.A
i += 4
}
}
})
return dst
}

148
vendor/github.com/disintegration/imaging/convolution.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
package imaging
import (
"image"
)
// ConvolveOptions are convolution parameters.
type ConvolveOptions struct {
// If Normalize is true the kernel is normalized before convolution.
Normalize bool
// If Abs is true the absolute value of each color channel is taken after convolution.
Abs bool
// Bias is added to each color channel value after convolution.
Bias int
}
// Convolve3x3 convolves the image with the specified 3x3 convolution kernel.
// Default parameters are used if a nil *ConvolveOptions is passed.
func Convolve3x3(img image.Image, kernel [9]float64, options *ConvolveOptions) *image.NRGBA {
return convolve(img, kernel[:], options)
}
// Convolve5x5 convolves the image with the specified 5x5 convolution kernel.
// Default parameters are used if a nil *ConvolveOptions is passed.
func Convolve5x5(img image.Image, kernel [25]float64, options *ConvolveOptions) *image.NRGBA {
return convolve(img, kernel[:], options)
}
func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *image.NRGBA {
src := toNRGBA(img)
w := src.Bounds().Max.X
h := src.Bounds().Max.Y
dst := image.NewNRGBA(image.Rect(0, 0, w, h))
if w < 1 || h < 1 {
return dst
}
if options == nil {
options = &ConvolveOptions{}
}
if options.Normalize {
normalizeKernel(kernel)
}
type coef struct {
x, y int
k float64
}
var coefs []coef
var m int
switch len(kernel) {
case 9:
m = 1
case 25:
m = 2
}
i := 0
for y := -m; y <= m; y++ {
for x := -m; x <= m; x++ {
if kernel[i] != 0 {
coefs = append(coefs, coef{x: x, y: y, k: kernel[i]})
}
i++
}
}
parallel(0, h, func(ys <-chan int) {
for y := range ys {
for x := 0; x < w; x++ {
var r, g, b float64
for _, c := range coefs {
ix := x + c.x
if ix < 0 {
ix = 0
} else if ix >= w {
ix = w - 1
}
iy := y + c.y
if iy < 0 {
iy = 0
} else if iy >= h {
iy = h - 1
}
off := iy*src.Stride + ix*4
s := src.Pix[off : off+3 : off+3]
r += float64(s[0]) * c.k
g += float64(s[1]) * c.k
b += float64(s[2]) * c.k
}
if options.Abs {
if r < 0 {
r = -r
}
if g < 0 {
g = -g
}
if b < 0 {
b = -b
}
}
if options.Bias != 0 {
r += float64(options.Bias)
g += float64(options.Bias)
b += float64(options.Bias)
}
srcOff := y*src.Stride + x*4
dstOff := y*dst.Stride + x*4
d := dst.Pix[dstOff : dstOff+4 : dstOff+4]
d[0] = clamp(r)
d[1] = clamp(g)
d[2] = clamp(b)
d[3] = src.Pix[srcOff+3]
}
}
})
return dst
}
func normalizeKernel(kernel []float64) {
var sum, sumpos float64
for i := range kernel {
sum += kernel[i]
if kernel[i] > 0 {
sumpos += kernel[i]
}
}
if sum != 0 {
for i := range kernel {
kernel[i] /= sum
}
} else if sumpos != 0 {
for i := range kernel {
kernel[i] /= sumpos
}
}
}

7
vendor/github.com/disintegration/imaging/doc.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
/*
Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
All the image processing functions provided by the package accept any image type that implements image.Image interface
as an input, and return a new image of *image.NRGBA type (32bit RGBA colors, non-premultiplied alpha).
*/
package imaging

169
vendor/github.com/disintegration/imaging/effects.go generated vendored Normal file
View File

@ -0,0 +1,169 @@
package imaging
import (
"image"
"math"
)
func gaussianBlurKernel(x, sigma float64) float64 {
return math.Exp(-(x*x)/(2*sigma*sigma)) / (sigma * math.Sqrt(2*math.Pi))
}
// Blur produces a blurred version of the image using a Gaussian function.
// Sigma parameter must be positive and indicates how much the image will be blurred.
//
// Example:
//
// dstImage := imaging.Blur(srcImage, 3.5)
//
func Blur(img image.Image, sigma float64) *image.NRGBA {
if sigma <= 0 {
return Clone(img)
}
radius := int(math.Ceil(sigma * 3.0))
kernel := make([]float64, radius+1)
for i := 0; i <= radius; i++ {
kernel[i] = gaussianBlurKernel(float64(i), sigma)
}
return blurVertical(blurHorizontal(img, kernel), kernel)
}
func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
radius := len(kernel) - 1
parallel(0, src.h, func(ys <-chan int) {
scanLine := make([]uint8, src.w*4)
scanLineF := make([]float64, len(scanLine))
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
for i, v := range scanLine {
scanLineF[i] = float64(v)
}
for x := 0; x < src.w; x++ {
min := x - radius
if min < 0 {
min = 0
}
max := x + radius
if max > src.w-1 {
max = src.w - 1
}
var r, g, b, a, wsum float64
for ix := min; ix <= max; ix++ {
i := ix * 4
weight := kernel[absint(x-ix)]
wsum += weight
s := scanLineF[i : i+4 : i+4]
wa := s[3] * weight
r += s[0] * wa
g += s[1] * wa
b += s[2] * wa
a += wa
}
if a != 0 {
aInv := 1 / a
j := y*dst.Stride + x*4
d := dst.Pix[j : j+4 : j+4]
d[0] = clamp(r * aInv)
d[1] = clamp(g * aInv)
d[2] = clamp(b * aInv)
d[3] = clamp(a / wsum)
}
}
}
})
return dst
}
func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
radius := len(kernel) - 1
parallel(0, src.w, func(xs <-chan int) {
scanLine := make([]uint8, src.h*4)
scanLineF := make([]float64, len(scanLine))
for x := range xs {
src.scan(x, 0, x+1, src.h, scanLine)
for i, v := range scanLine {
scanLineF[i] = float64(v)
}
for y := 0; y < src.h; y++ {
min := y - radius
if min < 0 {
min = 0
}
max := y + radius
if max > src.h-1 {
max = src.h - 1
}
var r, g, b, a, wsum float64
for iy := min; iy <= max; iy++ {
i := iy * 4
weight := kernel[absint(y-iy)]
wsum += weight
s := scanLineF[i : i+4 : i+4]
wa := s[3] * weight
r += s[0] * wa
g += s[1] * wa
b += s[2] * wa
a += wa
}
if a != 0 {
aInv := 1 / a
j := y*dst.Stride + x*4
d := dst.Pix[j : j+4 : j+4]
d[0] = clamp(r * aInv)
d[1] = clamp(g * aInv)
d[2] = clamp(b * aInv)
d[3] = clamp(a / wsum)
}
}
}
})
return dst
}
// Sharpen produces a sharpened version of the image.
// Sigma parameter must be positive and indicates how much the image will be sharpened.
//
// Example:
//
// dstImage := imaging.Sharpen(srcImage, 3.5)
//
func Sharpen(img image.Image, sigma float64) *image.NRGBA {
if sigma <= 0 {
return Clone(img)
}
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
blurred := Blur(img, sigma)
parallel(0, src.h, func(ys <-chan int) {
scanLine := make([]uint8, src.w*4)
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
j := y * dst.Stride
for i := 0; i < src.w*4; i++ {
val := int(scanLine[i])<<1 - int(blurred.Pix[j])
if val < 0 {
val = 0
} else if val > 0xff {
val = 0xff
}
dst.Pix[j] = uint8(val)
j++
}
}
})
return dst
}

52
vendor/github.com/disintegration/imaging/histogram.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package imaging
import (
"image"
"sync"
)
// Histogram returns a normalized histogram of an image.
//
// Resulting histogram is represented as an array of 256 floats, where
// histogram[i] is a probability of a pixel being of a particular luminance i.
func Histogram(img image.Image) [256]float64 {
var mu sync.Mutex
var histogram [256]float64
var total float64
src := newScanner(img)
if src.w == 0 || src.h == 0 {
return histogram
}
parallel(0, src.h, func(ys <-chan int) {
var tmpHistogram [256]float64
var tmpTotal float64
scanLine := make([]uint8, src.w*4)
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
i := 0
for x := 0; x < src.w; x++ {
s := scanLine[i : i+3 : i+3]
r := s[0]
g := s[1]
b := s[2]
y := 0.299*float32(r) + 0.587*float32(g) + 0.114*float32(b)
tmpHistogram[int(y+0.5)]++
tmpTotal++
i += 4
}
}
mu.Lock()
for i := 0; i < 256; i++ {
histogram[i] += tmpHistogram[i]
}
total += tmpTotal
mu.Unlock()
})
for i := 0; i < 256; i++ {
histogram[i] = histogram[i] / total
}
return histogram
}

444
vendor/github.com/disintegration/imaging/io.go generated vendored Normal file
View File

@ -0,0 +1,444 @@
package imaging
import (
"encoding/binary"
"errors"
"image"
"image/draw"
"image/gif"
"image/jpeg"
"image/png"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"golang.org/x/image/bmp"
"golang.org/x/image/tiff"
)
type fileSystem interface {
Create(string) (io.WriteCloser, error)
Open(string) (io.ReadCloser, error)
}
type localFS struct{}
func (localFS) Create(name string) (io.WriteCloser, error) { return os.Create(name) }
func (localFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }
var fs fileSystem = localFS{}
type decodeConfig struct {
autoOrientation bool
}
var defaultDecodeConfig = decodeConfig{
autoOrientation: false,
}
// DecodeOption sets an optional parameter for the Decode and Open functions.
type DecodeOption func(*decodeConfig)
// AutoOrientation returns a DecodeOption that sets the auto-orientation mode.
// If auto-orientation is enabled, the image will be transformed after decoding
// according to the EXIF orientation tag (if present). By default it's disabled.
func AutoOrientation(enabled bool) DecodeOption {
return func(c *decodeConfig) {
c.autoOrientation = enabled
}
}
// Decode reads an image from r.
func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
cfg := defaultDecodeConfig
for _, option := range opts {
option(&cfg)
}
if !cfg.autoOrientation {
img, _, err := image.Decode(r)
return img, err
}
var orient orientation
pr, pw := io.Pipe()
r = io.TeeReader(r, pw)
done := make(chan struct{})
go func() {
defer close(done)
orient = readOrientation(pr)
io.Copy(ioutil.Discard, pr)
}()
img, _, err := image.Decode(r)
pw.Close()
<-done
if err != nil {
return nil, err
}
return fixOrientation(img, orient), nil
}
// Open loads an image from file.
//
// Examples:
//
// // Load an image from file.
// img, err := imaging.Open("test.jpg")
//
// // Load an image and transform it depending on the EXIF orientation tag (if present).
// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
//
func Open(filename string, opts ...DecodeOption) (image.Image, error) {
file, err := fs.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
return Decode(file, opts...)
}
// Format is an image file format.
type Format int
// Image file formats.
const (
JPEG Format = iota
PNG
GIF
TIFF
BMP
)
var formatExts = map[string]Format{
"jpg": JPEG,
"jpeg": JPEG,
"png": PNG,
"gif": GIF,
"tif": TIFF,
"tiff": TIFF,
"bmp": BMP,
}
var formatNames = map[Format]string{
JPEG: "JPEG",
PNG: "PNG",
GIF: "GIF",
TIFF: "TIFF",
BMP: "BMP",
}
func (f Format) String() string {
return formatNames[f]
}
// ErrUnsupportedFormat means the given image format is not supported.
var ErrUnsupportedFormat = errors.New("imaging: unsupported image format")
// FormatFromExtension parses image format from filename extension:
// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
func FormatFromExtension(ext string) (Format, error) {
if f, ok := formatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok {
return f, nil
}
return -1, ErrUnsupportedFormat
}
// FormatFromFilename parses image format from filename:
// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
func FormatFromFilename(filename string) (Format, error) {
ext := filepath.Ext(filename)
return FormatFromExtension(ext)
}
type encodeConfig struct {
jpegQuality int
gifNumColors int
gifQuantizer draw.Quantizer
gifDrawer draw.Drawer
pngCompressionLevel png.CompressionLevel
}
var defaultEncodeConfig = encodeConfig{
jpegQuality: 95,
gifNumColors: 256,
gifQuantizer: nil,
gifDrawer: nil,
pngCompressionLevel: png.DefaultCompression,
}
// EncodeOption sets an optional parameter for the Encode and Save functions.
type EncodeOption func(*encodeConfig)
// JPEGQuality returns an EncodeOption that sets the output JPEG quality.
// Quality ranges from 1 to 100 inclusive, higher is better. Default is 95.
func JPEGQuality(quality int) EncodeOption {
return func(c *encodeConfig) {
c.jpegQuality = quality
}
}
// GIFNumColors returns an EncodeOption that sets the maximum number of colors
// used in the GIF-encoded image. It ranges from 1 to 256. Default is 256.
func GIFNumColors(numColors int) EncodeOption {
return func(c *encodeConfig) {
c.gifNumColors = numColors
}
}
// GIFQuantizer returns an EncodeOption that sets the quantizer that is used to produce
// a palette of the GIF-encoded image.
func GIFQuantizer(quantizer draw.Quantizer) EncodeOption {
return func(c *encodeConfig) {
c.gifQuantizer = quantizer
}
}
// GIFDrawer returns an EncodeOption that sets the drawer that is used to convert
// the source image to the desired palette of the GIF-encoded image.
func GIFDrawer(drawer draw.Drawer) EncodeOption {
return func(c *encodeConfig) {
c.gifDrawer = drawer
}
}
// PNGCompressionLevel returns an EncodeOption that sets the compression level
// of the PNG-encoded image. Default is png.DefaultCompression.
func PNGCompressionLevel(level png.CompressionLevel) EncodeOption {
return func(c *encodeConfig) {
c.pngCompressionLevel = level
}
}
// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP).
func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error {
cfg := defaultEncodeConfig
for _, option := range opts {
option(&cfg)
}
switch format {
case JPEG:
if nrgba, ok := img.(*image.NRGBA); ok && nrgba.Opaque() {
rgba := &image.RGBA{
Pix: nrgba.Pix,
Stride: nrgba.Stride,
Rect: nrgba.Rect,
}
return jpeg.Encode(w, rgba, &jpeg.Options{Quality: cfg.jpegQuality})
}
return jpeg.Encode(w, img, &jpeg.Options{Quality: cfg.jpegQuality})
case PNG:
encoder := png.Encoder{CompressionLevel: cfg.pngCompressionLevel}
return encoder.Encode(w, img)
case GIF:
return gif.Encode(w, img, &gif.Options{
NumColors: cfg.gifNumColors,
Quantizer: cfg.gifQuantizer,
Drawer: cfg.gifDrawer,
})
case TIFF:
return tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true})
case BMP:
return bmp.Encode(w, img)
}
return ErrUnsupportedFormat
}
// Save saves the image to file with the specified filename.
// The format is determined from the filename extension:
// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
//
// Examples:
//
// // Save the image as PNG.
// err := imaging.Save(img, "out.png")
//
// // Save the image as JPEG with optional quality parameter set to 80.
// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
//
func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
f, err := FormatFromFilename(filename)
if err != nil {
return err
}
file, err := fs.Create(filename)
if err != nil {
return err
}
err = Encode(file, img, f, opts...)
errc := file.Close()
if err == nil {
err = errc
}
return err
}
// orientation is an EXIF flag that specifies the transformation
// that should be applied to image to display it correctly.
type orientation int
const (
orientationUnspecified = 0
orientationNormal = 1
orientationFlipH = 2
orientationRotate180 = 3
orientationFlipV = 4
orientationTranspose = 5
orientationRotate270 = 6
orientationTransverse = 7
orientationRotate90 = 8
)
// readOrientation tries to read the orientation EXIF flag from image data in r.
// If the EXIF data block is not found or the orientation flag is not found
// or any other error occures while reading the data, it returns the
// orientationUnspecified (0) value.
func readOrientation(r io.Reader) orientation {
const (
markerSOI = 0xffd8
markerAPP1 = 0xffe1
exifHeader = 0x45786966
byteOrderBE = 0x4d4d
byteOrderLE = 0x4949
orientationTag = 0x0112
)
// Check if JPEG SOI marker is present.
var soi uint16
if err := binary.Read(r, binary.BigEndian, &soi); err != nil {
return orientationUnspecified
}
if soi != markerSOI {
return orientationUnspecified // Missing JPEG SOI marker.
}
// Find JPEG APP1 marker.
for {
var marker, size uint16
if err := binary.Read(r, binary.BigEndian, &marker); err != nil {
return orientationUnspecified
}
if err := binary.Read(r, binary.BigEndian, &size); err != nil {
return orientationUnspecified
}
if marker>>8 != 0xff {
return orientationUnspecified // Invalid JPEG marker.
}
if marker == markerAPP1 {
break
}
if size < 2 {
return orientationUnspecified // Invalid block size.
}
if _, err := io.CopyN(ioutil.Discard, r, int64(size-2)); err != nil {
return orientationUnspecified
}
}
// Check if EXIF header is present.
var header uint32
if err := binary.Read(r, binary.BigEndian, &header); err != nil {
return orientationUnspecified
}
if header != exifHeader {
return orientationUnspecified
}
if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
return orientationUnspecified
}
// Read byte order information.
var (
byteOrderTag uint16
byteOrder binary.ByteOrder
)
if err := binary.Read(r, binary.BigEndian, &byteOrderTag); err != nil {
return orientationUnspecified
}
switch byteOrderTag {
case byteOrderBE:
byteOrder = binary.BigEndian
case byteOrderLE:
byteOrder = binary.LittleEndian
default:
return orientationUnspecified // Invalid byte order flag.
}
if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
return orientationUnspecified
}
// Skip the EXIF offset.
var offset uint32
if err := binary.Read(r, byteOrder, &offset); err != nil {
return orientationUnspecified
}
if offset < 8 {
return orientationUnspecified // Invalid offset value.
}
if _, err := io.CopyN(ioutil.Discard, r, int64(offset-8)); err != nil {
return orientationUnspecified
}
// Read the number of tags.
var numTags uint16
if err := binary.Read(r, byteOrder, &numTags); err != nil {
return orientationUnspecified
}
// Find the orientation tag.
for i := 0; i < int(numTags); i++ {
var tag uint16
if err := binary.Read(r, byteOrder, &tag); err != nil {
return orientationUnspecified
}
if tag != orientationTag {
if _, err := io.CopyN(ioutil.Discard, r, 10); err != nil {
return orientationUnspecified
}
continue
}
if _, err := io.CopyN(ioutil.Discard, r, 6); err != nil {
return orientationUnspecified
}
var val uint16
if err := binary.Read(r, byteOrder, &val); err != nil {
return orientationUnspecified
}
if val < 1 || val > 8 {
return orientationUnspecified // Invalid tag value.
}
return orientation(val)
}
return orientationUnspecified // Missing orientation tag.
}
// fixOrientation applies a transform to img corresponding to the given orientation flag.
func fixOrientation(img image.Image, o orientation) image.Image {
switch o {
case orientationNormal:
case orientationFlipH:
img = FlipH(img)
case orientationFlipV:
img = FlipV(img)
case orientationRotate90:
img = Rotate90(img)
case orientationRotate180:
img = Rotate180(img)
case orientationRotate270:
img = Rotate270(img)
case orientationTranspose:
img = Transpose(img)
case orientationTransverse:
img = Transverse(img)
}
return img
}

595
vendor/github.com/disintegration/imaging/resize.go generated vendored Normal file
View File

@ -0,0 +1,595 @@
package imaging
import (
"image"
"math"
)
type indexWeight struct {
index int
weight float64
}
func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWeight {
du := float64(srcSize) / float64(dstSize)
scale := du
if scale < 1.0 {
scale = 1.0
}
ru := math.Ceil(scale * filter.Support)
out := make([][]indexWeight, dstSize)
tmp := make([]indexWeight, 0, dstSize*int(ru+2)*2)
for v := 0; v < dstSize; v++ {
fu := (float64(v)+0.5)*du - 0.5
begin := int(math.Ceil(fu - ru))
if begin < 0 {
begin = 0
}
end := int(math.Floor(fu + ru))
if end > srcSize-1 {
end = srcSize - 1
}
var sum float64
for u := begin; u <= end; u++ {
w := filter.Kernel((float64(u) - fu) / scale)
if w != 0 {
sum += w
tmp = append(tmp, indexWeight{index: u, weight: w})
}
}
if sum != 0 {
for i := range tmp {
tmp[i].weight /= sum
}
}
out[v] = tmp
tmp = tmp[len(tmp):]
}
return out
}
// Resize resizes the image to the specified width and height using the specified resampling
// filter and returns the transformed image. If one of width or height is 0, the image aspect
// ratio is preserved.
//
// Example:
//
// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos)
//
func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height
if dstW < 0 || dstH < 0 {
return &image.NRGBA{}
}
if dstW == 0 && dstH == 0 {
return &image.NRGBA{}
}
srcW := img.Bounds().Dx()
srcH := img.Bounds().Dy()
if srcW <= 0 || srcH <= 0 {
return &image.NRGBA{}
}
// If new width or height is 0 then preserve aspect ratio, minimum 1px.
if dstW == 0 {
tmpW := float64(dstH) * float64(srcW) / float64(srcH)
dstW = int(math.Max(1.0, math.Floor(tmpW+0.5)))
}
if dstH == 0 {
tmpH := float64(dstW) * float64(srcH) / float64(srcW)
dstH = int(math.Max(1.0, math.Floor(tmpH+0.5)))
}
if filter.Support <= 0 {
// Nearest-neighbor special case.
return resizeNearest(img, dstW, dstH)
}
if srcW != dstW && srcH != dstH {
return resizeVertical(resizeHorizontal(img, dstW, filter), dstH, filter)
}
if srcW != dstW {
return resizeHorizontal(img, dstW, filter)
}
if srcH != dstH {
return resizeVertical(img, dstH, filter)
}
return Clone(img)
}
func resizeHorizontal(img image.Image, width int, filter ResampleFilter) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, width, src.h))
weights := precomputeWeights(width, src.w, filter)
parallel(0, src.h, func(ys <-chan int) {
scanLine := make([]uint8, src.w*4)
for y := range ys {
src.scan(0, y, src.w, y+1, scanLine)
j0 := y * dst.Stride
for x := range weights {
var r, g, b, a float64
for _, w := range weights[x] {
i := w.index * 4
s := scanLine[i : i+4 : i+4]
aw := float64(s[3]) * w.weight
r += float64(s[0]) * aw
g += float64(s[1]) * aw
b += float64(s[2]) * aw
a += aw
}
if a != 0 {
aInv := 1 / a
j := j0 + x*4
d := dst.Pix[j : j+4 : j+4]
d[0] = clamp(r * aInv)
d[1] = clamp(g * aInv)
d[2] = clamp(b * aInv)
d[3] = clamp(a)
}
}
}
})
return dst
}
func resizeVertical(img image.Image, height int, filter ResampleFilter) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, height))
weights := precomputeWeights(height, src.h, filter)
parallel(0, src.w, func(xs <-chan int) {
scanLine := make([]uint8, src.h*4)
for x := range xs {
src.scan(x, 0, x+1, src.h, scanLine)
for y := range weights {
var r, g, b, a float64
for _, w := range weights[y] {
i := w.index * 4
s := scanLine[i : i+4 : i+4]
aw := float64(s[3]) * w.weight
r += float64(s[0]) * aw
g += float64(s[1]) * aw
b += float64(s[2]) * aw
a += aw
}
if a != 0 {
aInv := 1 / a
j := y*dst.Stride + x*4
d := dst.Pix[j : j+4 : j+4]
d[0] = clamp(r * aInv)
d[1] = clamp(g * aInv)
d[2] = clamp(b * aInv)
d[3] = clamp(a)
}
}
}
})
return dst
}
// resizeNearest is a fast nearest-neighbor resize, no filtering.
func resizeNearest(img image.Image, width, height int) *image.NRGBA {
dst := image.NewNRGBA(image.Rect(0, 0, width, height))
dx := float64(img.Bounds().Dx()) / float64(width)
dy := float64(img.Bounds().Dy()) / float64(height)
if dx > 1 && dy > 1 {
src := newScanner(img)
parallel(0, height, func(ys <-chan int) {
for y := range ys {
srcY := int((float64(y) + 0.5) * dy)
dstOff := y * dst.Stride
for x := 0; x < width; x++ {
srcX := int((float64(x) + 0.5) * dx)
src.scan(srcX, srcY, srcX+1, srcY+1, dst.Pix[dstOff:dstOff+4])
dstOff += 4
}
}
})
} else {
src := toNRGBA(img)
parallel(0, height, func(ys <-chan int) {
for y := range ys {
srcY := int((float64(y) + 0.5) * dy)
srcOff0 := srcY * src.Stride
dstOff := y * dst.Stride
for x := 0; x < width; x++ {
srcX := int((float64(x) + 0.5) * dx)
srcOff := srcOff0 + srcX*4
copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4])
dstOff += 4
}
}
})
}
return dst
}
// Fit scales down the image using the specified resample filter to fit the specified
// maximum width and height and returns the transformed image.
//
// Example:
//
// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
//
func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
maxW, maxH := width, height
if maxW <= 0 || maxH <= 0 {
return &image.NRGBA{}
}
srcBounds := img.Bounds()
srcW := srcBounds.Dx()
srcH := srcBounds.Dy()
if srcW <= 0 || srcH <= 0 {
return &image.NRGBA{}
}
if srcW <= maxW && srcH <= maxH {
return Clone(img)
}
srcAspectRatio := float64(srcW) / float64(srcH)
maxAspectRatio := float64(maxW) / float64(maxH)
var newW, newH int
if srcAspectRatio > maxAspectRatio {
newW = maxW
newH = int(float64(newW) / srcAspectRatio)
} else {
newH = maxH
newW = int(float64(newH) * srcAspectRatio)
}
return Resize(img, newW, newH, filter)
}
// Fill creates an image with the specified dimensions and fills it with the scaled source image.
// To achieve the correct aspect ratio without stretching, the source image will be cropped.
//
// Example:
//
// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos)
//
func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height
if dstW <= 0 || dstH <= 0 {
return &image.NRGBA{}
}
srcBounds := img.Bounds()
srcW := srcBounds.Dx()
srcH := srcBounds.Dy()
if srcW <= 0 || srcH <= 0 {
return &image.NRGBA{}
}
if srcW == dstW && srcH == dstH {
return Clone(img)
}
if srcW >= 100 && srcH >= 100 {
return cropAndResize(img, dstW, dstH, anchor, filter)
}
return resizeAndCrop(img, dstW, dstH, anchor, filter)
}
// cropAndResize crops the image to the smallest possible size that has the required aspect ratio using
// the given anchor point, then scales it to the specified dimensions and returns the transformed image.
//
// This is generally faster than resizing first, but may result in inaccuracies when used on small source images.
func cropAndResize(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height
srcBounds := img.Bounds()
srcW := srcBounds.Dx()
srcH := srcBounds.Dy()
srcAspectRatio := float64(srcW) / float64(srcH)
dstAspectRatio := float64(dstW) / float64(dstH)
var tmp *image.NRGBA
if srcAspectRatio < dstAspectRatio {
cropH := float64(srcW) * float64(dstH) / float64(dstW)
tmp = CropAnchor(img, srcW, int(math.Max(1, cropH)+0.5), anchor)
} else {
cropW := float64(srcH) * float64(dstW) / float64(dstH)
tmp = CropAnchor(img, int(math.Max(1, cropW)+0.5), srcH, anchor)
}
return Resize(tmp, dstW, dstH, filter)
}
// resizeAndCrop resizes the image to the smallest possible size that will cover the specified dimensions,
// crops the resized image to the specified dimensions using the given anchor point and returns
// the transformed image.
func resizeAndCrop(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height
srcBounds := img.Bounds()
srcW := srcBounds.Dx()
srcH := srcBounds.Dy()
srcAspectRatio := float64(srcW) / float64(srcH)
dstAspectRatio := float64(dstW) / float64(dstH)
var tmp *image.NRGBA
if srcAspectRatio < dstAspectRatio {
tmp = Resize(img, dstW, 0, filter)
} else {
tmp = Resize(img, 0, dstH, filter)
}
return CropAnchor(tmp, dstW, dstH, anchor)
}
// Thumbnail scales the image up or down using the specified resample filter, crops it
// to the specified width and hight and returns the transformed image.
//
// Example:
//
// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos)
//
func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
return Fill(img, width, height, Center, filter)
}
// ResampleFilter specifies a resampling filter to be used for image resizing.
//
// General filter recommendations:
//
// - Lanczos
// A high-quality resampling filter for photographic images yielding sharp results.
//
// - CatmullRom
// A sharp cubic filter that is faster than Lanczos filter while providing similar results.
//
// - MitchellNetravali
// A cubic filter that produces smoother results with less ringing artifacts than CatmullRom.
//
// - Linear
// Bilinear resampling filter, produces a smooth output. Faster than cubic filters.
//
// - Box
// Simple and fast averaging filter appropriate for downscaling.
// When upscaling it's similar to NearestNeighbor.
//
// - NearestNeighbor
// Fastest resampling filter, no antialiasing.
//
type ResampleFilter struct {
Support float64
Kernel func(float64) float64
}
// NearestNeighbor is a nearest-neighbor filter (no anti-aliasing).
var NearestNeighbor ResampleFilter
// Box filter (averaging pixels).
var Box ResampleFilter
// Linear filter.
var Linear ResampleFilter
// Hermite cubic spline filter (BC-spline; B=0; C=0).
var Hermite ResampleFilter
// MitchellNetravali is Mitchell-Netravali cubic filter (BC-spline; B=1/3; C=1/3).
var MitchellNetravali ResampleFilter
// CatmullRom is a Catmull-Rom - sharp cubic filter (BC-spline; B=0; C=0.5).
var CatmullRom ResampleFilter
// BSpline is a smooth cubic filter (BC-spline; B=1; C=0).
var BSpline ResampleFilter
// Gaussian is a Gaussian blurring filter.
var Gaussian ResampleFilter
// Bartlett is a Bartlett-windowed sinc filter (3 lobes).
var Bartlett ResampleFilter
// Lanczos filter (3 lobes).
var Lanczos ResampleFilter
// Hann is a Hann-windowed sinc filter (3 lobes).
var Hann ResampleFilter
// Hamming is a Hamming-windowed sinc filter (3 lobes).
var Hamming ResampleFilter
// Blackman is a Blackman-windowed sinc filter (3 lobes).
var Blackman ResampleFilter
// Welch is a Welch-windowed sinc filter (parabolic window, 3 lobes).
var Welch ResampleFilter
// Cosine is a Cosine-windowed sinc filter (3 lobes).
var Cosine ResampleFilter
func bcspline(x, b, c float64) float64 {
var y float64
x = math.Abs(x)
if x < 1.0 {
y = ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6
} else if x < 2.0 {
y = ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6
}
return y
}
func sinc(x float64) float64 {
if x == 0 {
return 1
}
return math.Sin(math.Pi*x) / (math.Pi * x)
}
func init() {
NearestNeighbor = ResampleFilter{
Support: 0.0, // special case - not applying the filter
}
Box = ResampleFilter{
Support: 0.5,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x <= 0.5 {
return 1.0
}
return 0
},
}
Linear = ResampleFilter{
Support: 1.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return 1.0 - x
}
return 0
},
}
Hermite = ResampleFilter{
Support: 1.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 1.0 {
return bcspline(x, 0.0, 0.0)
}
return 0
},
}
MitchellNetravali = ResampleFilter{
Support: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 1.0/3.0, 1.0/3.0)
}
return 0
},
}
CatmullRom = ResampleFilter{
Support: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 0.0, 0.5)
}
return 0
},
}
BSpline = ResampleFilter{
Support: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return bcspline(x, 1.0, 0.0)
}
return 0
},
}
Gaussian = ResampleFilter{
Support: 2.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 2.0 {
return math.Exp(-2 * x * x)
}
return 0
},
}
Bartlett = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (3.0 - x) / 3.0
}
return 0
},
}
Lanczos = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * sinc(x/3.0)
}
return 0
},
}
Hann = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0))
}
return 0
},
}
Hamming = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0))
}
return 0
},
}
Blackman = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0))
}
return 0
},
}
Welch = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * (1.0 - (x * x / 9.0))
}
return 0
},
}
Cosine = ResampleFilter{
Support: 3.0,
Kernel: func(x float64) float64 {
x = math.Abs(x)
if x < 3.0 {
return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0))
}
return 0
},
}
}

285
vendor/github.com/disintegration/imaging/scanner.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
package imaging
import (
"image"
"image/color"
)
type scanner struct {
image image.Image
w, h int
palette []color.NRGBA
}
func newScanner(img image.Image) *scanner {
s := &scanner{
image: img,
w: img.Bounds().Dx(),
h: img.Bounds().Dy(),
}
if img, ok := img.(*image.Paletted); ok {
s.palette = make([]color.NRGBA, len(img.Palette))
for i := 0; i < len(img.Palette); i++ {
s.palette[i] = color.NRGBAModel.Convert(img.Palette[i]).(color.NRGBA)
}
}
return s
}
// scan scans the given rectangular region of the image into dst.
func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
switch img := s.image.(type) {
case *image.NRGBA:
size := (x2 - x1) * 4
j := 0
i := y1*img.Stride + x1*4
if size == 4 {
for y := y1; y < y2; y++ {
d := dst[j : j+4 : j+4]
s := img.Pix[i : i+4 : i+4]
d[0] = s[0]
d[1] = s[1]
d[2] = s[2]
d[3] = s[3]
j += size
i += img.Stride
}
} else {
for y := y1; y < y2; y++ {
copy(dst[j:j+size], img.Pix[i:i+size])
j += size
i += img.Stride
}
}
case *image.NRGBA64:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1*8
for x := x1; x < x2; x++ {
s := img.Pix[i : i+8 : i+8]
d := dst[j : j+4 : j+4]
d[0] = s[0]
d[1] = s[2]
d[2] = s[4]
d[3] = s[6]
j += 4
i += 8
}
}
case *image.RGBA:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1*4
for x := x1; x < x2; x++ {
d := dst[j : j+4 : j+4]
a := img.Pix[i+3]
switch a {
case 0:
d[0] = 0
d[1] = 0
d[2] = 0
d[3] = a
case 0xff:
s := img.Pix[i : i+4 : i+4]
d[0] = s[0]
d[1] = s[1]
d[2] = s[2]
d[3] = a
default:
s := img.Pix[i : i+4 : i+4]
r16 := uint16(s[0])
g16 := uint16(s[1])
b16 := uint16(s[2])
a16 := uint16(a)
d[0] = uint8(r16 * 0xff / a16)
d[1] = uint8(g16 * 0xff / a16)
d[2] = uint8(b16 * 0xff / a16)
d[3] = a
}
j += 4
i += 4
}
}
case *image.RGBA64:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1*8
for x := x1; x < x2; x++ {
s := img.Pix[i : i+8 : i+8]
d := dst[j : j+4 : j+4]
a := s[6]
switch a {
case 0:
d[0] = 0
d[1] = 0
d[2] = 0
case 0xff:
d[0] = s[0]
d[1] = s[2]
d[2] = s[4]
default:
r32 := uint32(s[0])<<8 | uint32(s[1])
g32 := uint32(s[2])<<8 | uint32(s[3])
b32 := uint32(s[4])<<8 | uint32(s[5])
a32 := uint32(s[6])<<8 | uint32(s[7])
d[0] = uint8((r32 * 0xffff / a32) >> 8)
d[1] = uint8((g32 * 0xffff / a32) >> 8)
d[2] = uint8((b32 * 0xffff / a32) >> 8)
}
d[3] = a
j += 4
i += 8
}
}
case *image.Gray:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1
for x := x1; x < x2; x++ {
c := img.Pix[i]
d := dst[j : j+4 : j+4]
d[0] = c
d[1] = c
d[2] = c
d[3] = 0xff
j += 4
i++
}
}
case *image.Gray16:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1*2
for x := x1; x < x2; x++ {
c := img.Pix[i]
d := dst[j : j+4 : j+4]
d[0] = c
d[1] = c
d[2] = c
d[3] = 0xff
j += 4
i += 2
}
}
case *image.YCbCr:
j := 0
x1 += img.Rect.Min.X
x2 += img.Rect.Min.X
y1 += img.Rect.Min.Y
y2 += img.Rect.Min.Y
hy := img.Rect.Min.Y / 2
hx := img.Rect.Min.X / 2
for y := y1; y < y2; y++ {
iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
var yBase int
switch img.SubsampleRatio {
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio422:
yBase = (y - img.Rect.Min.Y) * img.CStride
case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
yBase = (y/2 - hy) * img.CStride
}
for x := x1; x < x2; x++ {
var ic int
switch img.SubsampleRatio {
case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio440:
ic = yBase + (x - img.Rect.Min.X)
case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
ic = yBase + (x/2 - hx)
default:
ic = img.COffset(x, y)
}
yy1 := int32(img.Y[iy]) * 0x10101
cb1 := int32(img.Cb[ic]) - 128
cr1 := int32(img.Cr[ic]) - 128
r := yy1 + 91881*cr1
if uint32(r)&0xff000000 == 0 {
r >>= 16
} else {
r = ^(r >> 31)
}
g := yy1 - 22554*cb1 - 46802*cr1
if uint32(g)&0xff000000 == 0 {
g >>= 16
} else {
g = ^(g >> 31)
}
b := yy1 + 116130*cb1
if uint32(b)&0xff000000 == 0 {
b >>= 16
} else {
b = ^(b >> 31)
}
d := dst[j : j+4 : j+4]
d[0] = uint8(r)
d[1] = uint8(g)
d[2] = uint8(b)
d[3] = 0xff
iy++
j += 4
}
}
case *image.Paletted:
j := 0
for y := y1; y < y2; y++ {
i := y*img.Stride + x1
for x := x1; x < x2; x++ {
c := s.palette[img.Pix[i]]
d := dst[j : j+4 : j+4]
d[0] = c.R
d[1] = c.G
d[2] = c.B
d[3] = c.A
j += 4
i++
}
}
default:
j := 0
b := s.image.Bounds()
x1 += b.Min.X
x2 += b.Min.X
y1 += b.Min.Y
y2 += b.Min.Y
for y := y1; y < y2; y++ {
for x := x1; x < x2; x++ {
r16, g16, b16, a16 := s.image.At(x, y).RGBA()
d := dst[j : j+4 : j+4]
switch a16 {
case 0xffff:
d[0] = uint8(r16 >> 8)
d[1] = uint8(g16 >> 8)
d[2] = uint8(b16 >> 8)
d[3] = 0xff
case 0:
d[0] = 0
d[1] = 0
d[2] = 0
d[3] = 0
default:
d[0] = uint8(((r16 * 0xffff) / a16) >> 8)
d[1] = uint8(((g16 * 0xffff) / a16) >> 8)
d[2] = uint8(((b16 * 0xffff) / a16) >> 8)
d[3] = uint8(a16 >> 8)
}
j += 4
}
}
}
}

249
vendor/github.com/disintegration/imaging/tools.go generated vendored Normal file
View File

@ -0,0 +1,249 @@
package imaging
import (
"bytes"
"image"
"image/color"
"math"
)
// New creates a new image with the specified width and height, and fills it with the specified color.
func New(width, height int, fillColor color.Color) *image.NRGBA {
if width <= 0 || height <= 0 {
return &image.NRGBA{}
}
c := color.NRGBAModel.Convert(fillColor).(color.NRGBA)
if (c == color.NRGBA{0, 0, 0, 0}) {
return image.NewNRGBA(image.Rect(0, 0, width, height))
}
return &image.NRGBA{
Pix: bytes.Repeat([]byte{c.R, c.G, c.B, c.A}, width*height),
Stride: 4 * width,
Rect: image.Rect(0, 0, width, height),
}
}
// Clone returns a copy of the given image.
func Clone(img image.Image) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
size := src.w * 4
parallel(0, src.h, func(ys <-chan int) {
for y := range ys {
i := y * dst.Stride
src.scan(0, y, src.w, y+1, dst.Pix[i:i+size])
}
})
return dst
}
// Anchor is the anchor point for image alignment.
type Anchor int
// Anchor point positions.
const (
Center Anchor = iota
TopLeft
Top
TopRight
Left
Right
BottomLeft
Bottom
BottomRight
)
func anchorPt(b image.Rectangle, w, h int, anchor Anchor) image.Point {
var x, y int
switch anchor {
case TopLeft:
x = b.Min.X
y = b.Min.Y
case Top:
x = b.Min.X + (b.Dx()-w)/2
y = b.Min.Y
case TopRight:
x = b.Max.X - w
y = b.Min.Y
case Left:
x = b.Min.X
y = b.Min.Y + (b.Dy()-h)/2
case Right:
x = b.Max.X - w
y = b.Min.Y + (b.Dy()-h)/2
case BottomLeft:
x = b.Min.X
y = b.Max.Y - h
case Bottom:
x = b.Min.X + (b.Dx()-w)/2
y = b.Max.Y - h
case BottomRight:
x = b.Max.X - w
y = b.Max.Y - h
default:
x = b.Min.X + (b.Dx()-w)/2
y = b.Min.Y + (b.Dy()-h)/2
}
return image.Pt(x, y)
}
// Crop cuts out a rectangular region with the specified bounds
// from the image and returns the cropped image.
func Crop(img image.Image, rect image.Rectangle) *image.NRGBA {
r := rect.Intersect(img.Bounds()).Sub(img.Bounds().Min)
if r.Empty() {
return &image.NRGBA{}
}
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, r.Dx(), r.Dy()))
rowSize := r.Dx() * 4
parallel(r.Min.Y, r.Max.Y, func(ys <-chan int) {
for y := range ys {
i := (y - r.Min.Y) * dst.Stride
src.scan(r.Min.X, y, r.Max.X, y+1, dst.Pix[i:i+rowSize])
}
})
return dst
}
// CropAnchor cuts out a rectangular region with the specified size
// from the image using the specified anchor point and returns the cropped image.
func CropAnchor(img image.Image, width, height int, anchor Anchor) *image.NRGBA {
srcBounds := img.Bounds()
pt := anchorPt(srcBounds, width, height, anchor)
r := image.Rect(0, 0, width, height).Add(pt)
b := srcBounds.Intersect(r)
return Crop(img, b)
}
// CropCenter cuts out a rectangular region with the specified size
// from the center of the image and returns the cropped image.
func CropCenter(img image.Image, width, height int) *image.NRGBA {
return CropAnchor(img, width, height, Center)
}
// Paste pastes the img image to the background image at the specified position and returns the combined image.
func Paste(background, img image.Image, pos image.Point) *image.NRGBA {
dst := Clone(background)
pos = pos.Sub(background.Bounds().Min)
pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())}
interRect := pasteRect.Intersect(dst.Bounds())
if interRect.Empty() {
return dst
}
src := newScanner(img)
parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) {
for y := range ys {
x1 := interRect.Min.X - pasteRect.Min.X
x2 := interRect.Max.X - pasteRect.Min.X
y1 := y - pasteRect.Min.Y
y2 := y1 + 1
i1 := y*dst.Stride + interRect.Min.X*4
i2 := i1 + interRect.Dx()*4
src.scan(x1, y1, x2, y2, dst.Pix[i1:i2])
}
})
return dst
}
// PasteCenter pastes the img image to the center of the background image and returns the combined image.
func PasteCenter(background, img image.Image) *image.NRGBA {
bgBounds := background.Bounds()
bgW := bgBounds.Dx()
bgH := bgBounds.Dy()
bgMinX := bgBounds.Min.X
bgMinY := bgBounds.Min.Y
centerX := bgMinX + bgW/2
centerY := bgMinY + bgH/2
x0 := centerX - img.Bounds().Dx()/2
y0 := centerY - img.Bounds().Dy()/2
return Paste(background, img, image.Pt(x0, y0))
}
// Overlay draws the img image over the background image at given position
// and returns the combined image. Opacity parameter is the opacity of the img
// image layer, used to compose the images, it must be from 0.0 to 1.0.
//
// Examples:
//
// // Draw spriteImage over backgroundImage at the given position (x=50, y=50).
// dstImage := imaging.Overlay(backgroundImage, spriteImage, image.Pt(50, 50), 1.0)
//
// // Blend two opaque images of the same size.
// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5)
//
func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA {
opacity = math.Min(math.Max(opacity, 0.0), 1.0) // Ensure 0.0 <= opacity <= 1.0.
dst := Clone(background)
pos = pos.Sub(background.Bounds().Min)
pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())}
interRect := pasteRect.Intersect(dst.Bounds())
if interRect.Empty() {
return dst
}
src := newScanner(img)
parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) {
scanLine := make([]uint8, interRect.Dx()*4)
for y := range ys {
x1 := interRect.Min.X - pasteRect.Min.X
x2 := interRect.Max.X - pasteRect.Min.X
y1 := y - pasteRect.Min.Y
y2 := y1 + 1
src.scan(x1, y1, x2, y2, scanLine)
i := y*dst.Stride + interRect.Min.X*4
j := 0
for x := interRect.Min.X; x < interRect.Max.X; x++ {
d := dst.Pix[i : i+4 : i+4]
r1 := float64(d[0])
g1 := float64(d[1])
b1 := float64(d[2])
a1 := float64(d[3])
s := scanLine[j : j+4 : j+4]
r2 := float64(s[0])
g2 := float64(s[1])
b2 := float64(s[2])
a2 := float64(s[3])
coef2 := opacity * a2 / 255
coef1 := (1 - coef2) * a1 / 255
coefSum := coef1 + coef2
coef1 /= coefSum
coef2 /= coefSum
d[0] = uint8(r1*coef1 + r2*coef2)
d[1] = uint8(g1*coef1 + g2*coef2)
d[2] = uint8(b1*coef1 + b2*coef2)
d[3] = uint8(math.Min(a1+a2*opacity*(255-a1)/255, 255))
i += 4
j += 4
}
}
})
return dst
}
// OverlayCenter overlays the img image to the center of the background image and
// returns the combined image. Opacity parameter is the opacity of the img
// image layer, used to compose the images, it must be from 0.0 to 1.0.
func OverlayCenter(background, img image.Image, opacity float64) *image.NRGBA {
bgBounds := background.Bounds()
bgW := bgBounds.Dx()
bgH := bgBounds.Dy()
bgMinX := bgBounds.Min.X
bgMinY := bgBounds.Min.Y
centerX := bgMinX + bgW/2
centerY := bgMinY + bgH/2
x0 := centerX - img.Bounds().Dx()/2
y0 := centerY - img.Bounds().Dy()/2
return Overlay(background, img, image.Point{x0, y0}, opacity)
}

268
vendor/github.com/disintegration/imaging/transform.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
package imaging
import (
"image"
"image/color"
"math"
)
// FlipH flips the image horizontally (from left to right) and returns the transformed image.
func FlipH(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstY
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// FlipV flips the image vertically (from top to bottom) and returns the transformed image.
func FlipV(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstH - dstY - 1
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise.
func Transpose(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstY
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Transverse flips the image vertically and rotates 90 degrees counter-clockwise.
func Transverse(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstH - dstY - 1
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate90 rotates the image 90 degrees counter-clockwise and returns the transformed image.
func Rotate90(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstH - dstY - 1
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
}
})
return dst
}
// Rotate180 rotates the image 180 degrees counter-clockwise and returns the transformed image.
func Rotate180(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.w
dstH := src.h
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcY := dstH - dstY - 1
src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate270 rotates the image 270 degrees counter-clockwise and returns the transformed image.
func Rotate270(img image.Image) *image.NRGBA {
src := newScanner(img)
dstW := src.h
dstH := src.w
rowSize := dstW * 4
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
i := dstY * dst.Stride
srcX := dstY
src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
reverse(dst.Pix[i : i+rowSize])
}
})
return dst
}
// Rotate rotates an image by the given angle counter-clockwise .
// The angle parameter is the rotation angle in degrees.
// The bgColor parameter specifies the color of the uncovered zone after the rotation.
func Rotate(img image.Image, angle float64, bgColor color.Color) *image.NRGBA {
angle = angle - math.Floor(angle/360)*360
switch angle {
case 0:
return Clone(img)
case 90:
return Rotate90(img)
case 180:
return Rotate180(img)
case 270:
return Rotate270(img)
}
src := toNRGBA(img)
srcW := src.Bounds().Max.X
srcH := src.Bounds().Max.Y
dstW, dstH := rotatedSize(srcW, srcH, angle)
dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
if dstW <= 0 || dstH <= 0 {
return dst
}
srcXOff := float64(srcW)/2 - 0.5
srcYOff := float64(srcH)/2 - 0.5
dstXOff := float64(dstW)/2 - 0.5
dstYOff := float64(dstH)/2 - 0.5
bgColorNRGBA := color.NRGBAModel.Convert(bgColor).(color.NRGBA)
sin, cos := math.Sincos(math.Pi * angle / 180)
parallel(0, dstH, func(ys <-chan int) {
for dstY := range ys {
for dstX := 0; dstX < dstW; dstX++ {
xf, yf := rotatePoint(float64(dstX)-dstXOff, float64(dstY)-dstYOff, sin, cos)
xf, yf = xf+srcXOff, yf+srcYOff
interpolatePoint(dst, dstX, dstY, src, xf, yf, bgColorNRGBA)
}
}
})
return dst
}
func rotatePoint(x, y, sin, cos float64) (float64, float64) {
return x*cos - y*sin, x*sin + y*cos
}
func rotatedSize(w, h int, angle float64) (int, int) {
if w <= 0 || h <= 0 {
return 0, 0
}
sin, cos := math.Sincos(math.Pi * angle / 180)
x1, y1 := rotatePoint(float64(w-1), 0, sin, cos)
x2, y2 := rotatePoint(float64(w-1), float64(h-1), sin, cos)
x3, y3 := rotatePoint(0, float64(h-1), sin, cos)
minx := math.Min(x1, math.Min(x2, math.Min(x3, 0)))
maxx := math.Max(x1, math.Max(x2, math.Max(x3, 0)))
miny := math.Min(y1, math.Min(y2, math.Min(y3, 0)))
maxy := math.Max(y1, math.Max(y2, math.Max(y3, 0)))
neww := maxx - minx + 1
if neww-math.Floor(neww) > 0.1 {
neww++
}
newh := maxy - miny + 1
if newh-math.Floor(newh) > 0.1 {
newh++
}
return int(neww), int(newh)
}
func interpolatePoint(dst *image.NRGBA, dstX, dstY int, src *image.NRGBA, xf, yf float64, bgColor color.NRGBA) {
j := dstY*dst.Stride + dstX*4
d := dst.Pix[j : j+4 : j+4]
x0 := int(math.Floor(xf))
y0 := int(math.Floor(yf))
bounds := src.Bounds()
if !image.Pt(x0, y0).In(image.Rect(bounds.Min.X-1, bounds.Min.Y-1, bounds.Max.X, bounds.Max.Y)) {
d[0] = bgColor.R
d[1] = bgColor.G
d[2] = bgColor.B
d[3] = bgColor.A
return
}
xq := xf - float64(x0)
yq := yf - float64(y0)
points := [4]image.Point{
{x0, y0},
{x0 + 1, y0},
{x0, y0 + 1},
{x0 + 1, y0 + 1},
}
weights := [4]float64{
(1 - xq) * (1 - yq),
xq * (1 - yq),
(1 - xq) * yq,
xq * yq,
}
var r, g, b, a float64
for i := 0; i < 4; i++ {
p := points[i]
w := weights[i]
if p.In(bounds) {
i := p.Y*src.Stride + p.X*4
s := src.Pix[i : i+4 : i+4]
wa := float64(s[3]) * w
r += float64(s[0]) * wa
g += float64(s[1]) * wa
b += float64(s[2]) * wa
a += wa
} else {
wa := float64(bgColor.A) * w
r += float64(bgColor.R) * wa
g += float64(bgColor.G) * wa
b += float64(bgColor.B) * wa
a += wa
}
}
if a != 0 {
aInv := 1 / a
d[0] = clamp(r * aInv)
d[1] = clamp(g * aInv)
d[2] = clamp(b * aInv)
d[3] = clamp(a)
}
}

167
vendor/github.com/disintegration/imaging/utils.go generated vendored Normal file
View File

@ -0,0 +1,167 @@
package imaging
import (
"image"
"math"
"runtime"
"sync"
)
// parallel processes the data in separate goroutines.
func parallel(start, stop int, fn func(<-chan int)) {
count := stop - start
if count < 1 {
return
}
procs := runtime.GOMAXPROCS(0)
if procs > count {
procs = count
}
c := make(chan int, count)
for i := start; i < stop; i++ {
c <- i
}
close(c)
var wg sync.WaitGroup
for i := 0; i < procs; i++ {
wg.Add(1)
go func() {
defer wg.Done()
fn(c)
}()
}
wg.Wait()
}
// absint returns the absolute value of i.
func absint(i int) int {
if i < 0 {
return -i
}
return i
}
// clamp rounds and clamps float64 value to fit into uint8.
func clamp(x float64) uint8 {
v := int64(x + 0.5)
if v > 255 {
return 255
}
if v > 0 {
return uint8(v)
}
return 0
}
func reverse(pix []uint8) {
if len(pix) <= 4 {
return
}
i := 0
j := len(pix) - 4
for i < j {
pi := pix[i : i+4 : i+4]
pj := pix[j : j+4 : j+4]
pi[0], pj[0] = pj[0], pi[0]
pi[1], pj[1] = pj[1], pi[1]
pi[2], pj[2] = pj[2], pi[2]
pi[3], pj[3] = pj[3], pi[3]
i += 4
j -= 4
}
}
func toNRGBA(img image.Image) *image.NRGBA {
if img, ok := img.(*image.NRGBA); ok {
return &image.NRGBA{
Pix: img.Pix,
Stride: img.Stride,
Rect: img.Rect.Sub(img.Rect.Min),
}
}
return Clone(img)
}
// rgbToHSL converts a color from RGB to HSL.
func rgbToHSL(r, g, b uint8) (float64, float64, float64) {
rr := float64(r) / 255
gg := float64(g) / 255
bb := float64(b) / 255
max := math.Max(rr, math.Max(gg, bb))
min := math.Min(rr, math.Min(gg, bb))
l := (max + min) / 2
if max == min {
return 0, 0, l
}
var h, s float64
d := max - min
if l > 0.5 {
s = d / (2 - max - min)
} else {
s = d / (max + min)
}
switch max {
case rr:
h = (gg - bb) / d
if g < b {
h += 6
}
case gg:
h = (bb-rr)/d + 2
case bb:
h = (rr-gg)/d + 4
}
h /= 6
return h, s, l
}
// hslToRGB converts a color from HSL to RGB.
func hslToRGB(h, s, l float64) (uint8, uint8, uint8) {
var r, g, b float64
if s == 0 {
v := clamp(l * 255)
return v, v, v
}
var q float64
if l < 0.5 {
q = l * (1 + s)
} else {
q = l + s - l*s
}
p := 2*l - q
r = hueToRGB(p, q, h+1/3.0)
g = hueToRGB(p, q, h)
b = hueToRGB(p, q, h-1/3.0)
return clamp(r * 255), clamp(g * 255), clamp(b * 255)
}
func hueToRGB(p, q, t float64) float64 {
if t < 0 {
t++
}
if t > 1 {
t--
}
if t < 1/6.0 {
return p + (q-p)*6*t
}
if t < 1/2.0 {
return q
}
if t < 2/3.0 {
return p + (q-p)*(2/3.0-t)*6
}
return p
}

View File

@ -1,7 +0,0 @@
language: go
go:
- "1.x"
- "1.1"
- "1.4"
- "1.10"

View File

@ -1,13 +0,0 @@
Copyright (c) 2012, Jan Schlicht <jan.schlicht@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.

View File

@ -1,151 +0,0 @@
# This package is no longer being updated! Please look for alternatives if that bothers you.
Resize
======
Image resizing for the [Go programming language](http://golang.org) with common interpolation methods.
[![Build Status](https://travis-ci.org/nfnt/resize.svg)](https://travis-ci.org/nfnt/resize)
Installation
------------
```bash
$ go get github.com/nfnt/resize
```
It's that easy!
Usage
-----
This package needs at least Go 1.1. Import package with
```go
import "github.com/nfnt/resize"
```
The resize package provides 2 functions:
* `resize.Resize` creates a scaled image with new dimensions (`width`, `height`) using the interpolation function `interp`.
If either `width` or `height` is set to 0, it will be set to an aspect ratio preserving value.
* `resize.Thumbnail` downscales an image preserving its aspect ratio to the maximum dimensions (`maxWidth`, `maxHeight`).
It will return the original image if original sizes are smaller than the provided dimensions.
```go
resize.Resize(width, height uint, img image.Image, interp resize.InterpolationFunction) image.Image
resize.Thumbnail(maxWidth, maxHeight uint, img image.Image, interp resize.InterpolationFunction) image.Image
```
The provided interpolation functions are (from fast to slow execution time)
- `NearestNeighbor`: [Nearest-neighbor interpolation](http://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
- `Bilinear`: [Bilinear interpolation](http://en.wikipedia.org/wiki/Bilinear_interpolation)
- `Bicubic`: [Bicubic interpolation](http://en.wikipedia.org/wiki/Bicubic_interpolation)
- `MitchellNetravali`: [Mitchell-Netravali interpolation](http://dl.acm.org/citation.cfm?id=378514)
- `Lanczos2`: [Lanczos resampling](http://en.wikipedia.org/wiki/Lanczos_resampling) with a=2
- `Lanczos3`: [Lanczos resampling](http://en.wikipedia.org/wiki/Lanczos_resampling) with a=3
Which of these methods gives the best results depends on your use case.
Sample usage:
```go
package main
import (
"github.com/nfnt/resize"
"image/jpeg"
"log"
"os"
)
func main() {
// open "test.jpg"
file, err := os.Open("test.jpg")
if err != nil {
log.Fatal(err)
}
// decode jpeg into image.Image
img, err := jpeg.Decode(file)
if err != nil {
log.Fatal(err)
}
file.Close()
// resize to width 1000 using Lanczos resampling
// and preserve aspect ratio
m := resize.Resize(1000, 0, img, resize.Lanczos3)
out, err := os.Create("test_resized.jpg")
if err != nil {
log.Fatal(err)
}
defer out.Close()
// write new image to file
jpeg.Encode(out, m, nil)
}
```
Caveats
-------
* Optimized access routines are used for `image.RGBA`, `image.NRGBA`, `image.RGBA64`, `image.NRGBA64`, `image.YCbCr`, `image.Gray`, and `image.Gray16` types. All other image types are accessed in a generic way that will result in slow processing speed.
* JPEG images are stored in `image.YCbCr`. This image format stores data in a way that will decrease processing speed. A resize may be up to 2 times slower than with `image.RGBA`.
Downsizing Samples
-------
Downsizing is not as simple as it might look like. Images have to be filtered before they are scaled down, otherwise aliasing might occur.
Filtering is highly subjective: Applying too much will blur the whole image, too little will make aliasing become apparent.
Resize tries to provide sane defaults that should suffice in most cases.
### Artificial sample
Original image
![Rings](http://nfnt.github.com/img/rings_lg_orig.png)
<table>
<tr>
<th><img src="http://nfnt.github.com/img/rings_300_NearestNeighbor.png" /><br>Nearest-Neighbor</th>
<th><img src="http://nfnt.github.com/img/rings_300_Bilinear.png" /><br>Bilinear</th>
</tr>
<tr>
<th><img src="http://nfnt.github.com/img/rings_300_Bicubic.png" /><br>Bicubic</th>
<th><img src="http://nfnt.github.com/img/rings_300_MitchellNetravali.png" /><br>Mitchell-Netravali</th>
</tr>
<tr>
<th><img src="http://nfnt.github.com/img/rings_300_Lanczos2.png" /><br>Lanczos2</th>
<th><img src="http://nfnt.github.com/img/rings_300_Lanczos3.png" /><br>Lanczos3</th>
</tr>
</table>
### Real-Life sample
Original image
![Original](http://nfnt.github.com/img/IMG_3694_720.jpg)
<table>
<tr>
<th><img src="http://nfnt.github.com/img/IMG_3694_300_NearestNeighbor.png" /><br>Nearest-Neighbor</th>
<th><img src="http://nfnt.github.com/img/IMG_3694_300_Bilinear.png" /><br>Bilinear</th>
</tr>
<tr>
<th><img src="http://nfnt.github.com/img/IMG_3694_300_Bicubic.png" /><br>Bicubic</th>
<th><img src="http://nfnt.github.com/img/IMG_3694_300_MitchellNetravali.png" /><br>Mitchell-Netravali</th>
</tr>
<tr>
<th><img src="http://nfnt.github.com/img/IMG_3694_300_Lanczos2.png" /><br>Lanczos2</th>
<th><img src="http://nfnt.github.com/img/IMG_3694_300_Lanczos3.png" /><br>Lanczos3</th>
</tr>
</table>
License
-------
Copyright (c) 2012 Jan Schlicht <janschlicht@gmail.com>
Resize is released under a MIT style license.

View File

@ -1,438 +0,0 @@
/*
Copyright (c) 2012, Jan Schlicht <jan.schlicht@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
*/
package resize
import "image"
// Keep value in [0,255] range.
func clampUint8(in int32) uint8 {
// casting a negative int to an uint will result in an overflown
// large uint. this behavior will be exploited here and in other functions
// to achieve a higher performance.
if uint32(in) < 256 {
return uint8(in)
}
if in > 255 {
return 255
}
return 0
}
// Keep value in [0,65535] range.
func clampUint16(in int64) uint16 {
if uint64(in) < 65536 {
return uint16(in)
}
if in > 65535 {
return 65535
}
return 0
}
func resizeGeneric(in image.Image, out *image.RGBA64, scale float64, coeffs []int32, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]int64
var sum int64
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
coeff := coeffs[ci+i]
if coeff != 0 {
xi := start + i
switch {
case xi < 0:
xi = 0
case xi >= maxX:
xi = maxX
}
r, g, b, a := in.At(xi+in.Bounds().Min.X, x+in.Bounds().Min.Y).RGBA()
rgba[0] += int64(coeff) * int64(r)
rgba[1] += int64(coeff) * int64(g)
rgba[2] += int64(coeff) * int64(b)
rgba[3] += int64(coeff) * int64(a)
sum += int64(coeff)
}
}
offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
value := clampUint16(rgba[0] / sum)
out.Pix[offset+0] = uint8(value >> 8)
out.Pix[offset+1] = uint8(value)
value = clampUint16(rgba[1] / sum)
out.Pix[offset+2] = uint8(value >> 8)
out.Pix[offset+3] = uint8(value)
value = clampUint16(rgba[2] / sum)
out.Pix[offset+4] = uint8(value >> 8)
out.Pix[offset+5] = uint8(value)
value = clampUint16(rgba[3] / sum)
out.Pix[offset+6] = uint8(value >> 8)
out.Pix[offset+7] = uint8(value)
}
}
}
func resizeRGBA(in *image.RGBA, out *image.RGBA, scale float64, coeffs []int16, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]int32
var sum int32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
coeff := coeffs[ci+i]
if coeff != 0 {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 4
case xi >= maxX:
xi = 4 * maxX
default:
xi = 0
}
rgba[0] += int32(coeff) * int32(row[xi+0])
rgba[1] += int32(coeff) * int32(row[xi+1])
rgba[2] += int32(coeff) * int32(row[xi+2])
rgba[3] += int32(coeff) * int32(row[xi+3])
sum += int32(coeff)
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4
out.Pix[xo+0] = clampUint8(rgba[0] / sum)
out.Pix[xo+1] = clampUint8(rgba[1] / sum)
out.Pix[xo+2] = clampUint8(rgba[2] / sum)
out.Pix[xo+3] = clampUint8(rgba[3] / sum)
}
}
}
func resizeNRGBA(in *image.NRGBA, out *image.RGBA, scale float64, coeffs []int16, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]int32
var sum int32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
coeff := coeffs[ci+i]
if coeff != 0 {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 4
case xi >= maxX:
xi = 4 * maxX
default:
xi = 0
}
// Forward alpha-premultiplication
a := int32(row[xi+3])
r := int32(row[xi+0]) * a
r /= 0xff
g := int32(row[xi+1]) * a
g /= 0xff
b := int32(row[xi+2]) * a
b /= 0xff
rgba[0] += int32(coeff) * r
rgba[1] += int32(coeff) * g
rgba[2] += int32(coeff) * b
rgba[3] += int32(coeff) * a
sum += int32(coeff)
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4
out.Pix[xo+0] = clampUint8(rgba[0] / sum)
out.Pix[xo+1] = clampUint8(rgba[1] / sum)
out.Pix[xo+2] = clampUint8(rgba[2] / sum)
out.Pix[xo+3] = clampUint8(rgba[3] / sum)
}
}
}
func resizeRGBA64(in *image.RGBA64, out *image.RGBA64, scale float64, coeffs []int32, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]int64
var sum int64
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
coeff := coeffs[ci+i]
if coeff != 0 {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 8
case xi >= maxX:
xi = 8 * maxX
default:
xi = 0
}
rgba[0] += int64(coeff) * (int64(row[xi+0])<<8 | int64(row[xi+1]))
rgba[1] += int64(coeff) * (int64(row[xi+2])<<8 | int64(row[xi+3]))
rgba[2] += int64(coeff) * (int64(row[xi+4])<<8 | int64(row[xi+5]))
rgba[3] += int64(coeff) * (int64(row[xi+6])<<8 | int64(row[xi+7]))
sum += int64(coeff)
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
value := clampUint16(rgba[0] / sum)
out.Pix[xo+0] = uint8(value >> 8)
out.Pix[xo+1] = uint8(value)
value = clampUint16(rgba[1] / sum)
out.Pix[xo+2] = uint8(value >> 8)
out.Pix[xo+3] = uint8(value)
value = clampUint16(rgba[2] / sum)
out.Pix[xo+4] = uint8(value >> 8)
out.Pix[xo+5] = uint8(value)
value = clampUint16(rgba[3] / sum)
out.Pix[xo+6] = uint8(value >> 8)
out.Pix[xo+7] = uint8(value)
}
}
}
func resizeNRGBA64(in *image.NRGBA64, out *image.RGBA64, scale float64, coeffs []int32, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]int64
var sum int64
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
coeff := coeffs[ci+i]
if coeff != 0 {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 8
case xi >= maxX:
xi = 8 * maxX
default:
xi = 0
}
// Forward alpha-premultiplication
a := int64(uint16(row[xi+6])<<8 | uint16(row[xi+7]))
r := int64(uint16(row[xi+0])<<8|uint16(row[xi+1])) * a
r /= 0xffff
g := int64(uint16(row[xi+2])<<8|uint16(row[xi+3])) * a
g /= 0xffff
b := int64(uint16(row[xi+4])<<8|uint16(row[xi+5])) * a
b /= 0xffff
rgba[0] += int64(coeff) * r
rgba[1] += int64(coeff) * g
rgba[2] += int64(coeff) * b
rgba[3] += int64(coeff) * a
sum += int64(coeff)
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
value := clampUint16(rgba[0] / sum)
out.Pix[xo+0] = uint8(value >> 8)
out.Pix[xo+1] = uint8(value)
value = clampUint16(rgba[1] / sum)
out.Pix[xo+2] = uint8(value >> 8)
out.Pix[xo+3] = uint8(value)
value = clampUint16(rgba[2] / sum)
out.Pix[xo+4] = uint8(value >> 8)
out.Pix[xo+5] = uint8(value)
value = clampUint16(rgba[3] / sum)
out.Pix[xo+6] = uint8(value >> 8)
out.Pix[xo+7] = uint8(value)
}
}
}
func resizeGray(in *image.Gray, out *image.Gray, scale float64, coeffs []int16, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[(x-newBounds.Min.X)*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var gray int32
var sum int32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
coeff := coeffs[ci+i]
if coeff != 0 {
xi := start + i
switch {
case xi < 0:
xi = 0
case xi >= maxX:
xi = maxX
}
gray += int32(coeff) * int32(row[xi])
sum += int32(coeff)
}
}
offset := (y-newBounds.Min.Y)*out.Stride + (x - newBounds.Min.X)
out.Pix[offset] = clampUint8(gray / sum)
}
}
}
func resizeGray16(in *image.Gray16, out *image.Gray16, scale float64, coeffs []int32, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var gray int64
var sum int64
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
coeff := coeffs[ci+i]
if coeff != 0 {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 2
case xi >= maxX:
xi = 2 * maxX
default:
xi = 0
}
gray += int64(coeff) * int64(uint16(row[xi+0])<<8|uint16(row[xi+1]))
sum += int64(coeff)
}
}
offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*2
value := clampUint16(gray / sum)
out.Pix[offset+0] = uint8(value >> 8)
out.Pix[offset+1] = uint8(value)
}
}
}
func resizeYCbCr(in *ycc, out *ycc, scale float64, coeffs []int16, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var p [3]int32
var sum int32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
coeff := coeffs[ci+i]
if coeff != 0 {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 3
case xi >= maxX:
xi = 3 * maxX
default:
xi = 0
}
p[0] += int32(coeff) * int32(row[xi+0])
p[1] += int32(coeff) * int32(row[xi+1])
p[2] += int32(coeff) * int32(row[xi+2])
sum += int32(coeff)
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*3
out.Pix[xo+0] = clampUint8(p[0] / sum)
out.Pix[xo+1] = clampUint8(p[1] / sum)
out.Pix[xo+2] = clampUint8(p[2] / sum)
}
}
}
func nearestYCbCr(in *ycc, out *ycc, scale float64, coeffs []bool, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var p [3]float32
var sum float32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
if coeffs[ci+i] {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 3
case xi >= maxX:
xi = 3 * maxX
default:
xi = 0
}
p[0] += float32(row[xi+0])
p[1] += float32(row[xi+1])
p[2] += float32(row[xi+2])
sum++
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*3
out.Pix[xo+0] = floatToUint8(p[0] / sum)
out.Pix[xo+1] = floatToUint8(p[1] / sum)
out.Pix[xo+2] = floatToUint8(p[2] / sum)
}
}
}

View File

@ -1,143 +0,0 @@
/*
Copyright (c) 2012, Jan Schlicht <jan.schlicht@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
*/
package resize
import (
"math"
)
func nearest(in float64) float64 {
if in >= -0.5 && in < 0.5 {
return 1
}
return 0
}
func linear(in float64) float64 {
in = math.Abs(in)
if in <= 1 {
return 1 - in
}
return 0
}
func cubic(in float64) float64 {
in = math.Abs(in)
if in <= 1 {
return in*in*(1.5*in-2.5) + 1.0
}
if in <= 2 {
return in*(in*(2.5-0.5*in)-4.0) + 2.0
}
return 0
}
func mitchellnetravali(in float64) float64 {
in = math.Abs(in)
if in <= 1 {
return (7.0*in*in*in - 12.0*in*in + 5.33333333333) * 0.16666666666
}
if in <= 2 {
return (-2.33333333333*in*in*in + 12.0*in*in - 20.0*in + 10.6666666667) * 0.16666666666
}
return 0
}
func sinc(x float64) float64 {
x = math.Abs(x) * math.Pi
if x >= 1.220703e-4 {
return math.Sin(x) / x
}
return 1
}
func lanczos2(in float64) float64 {
if in > -2 && in < 2 {
return sinc(in) * sinc(in*0.5)
}
return 0
}
func lanczos3(in float64) float64 {
if in > -3 && in < 3 {
return sinc(in) * sinc(in*0.3333333333333333)
}
return 0
}
// range [-256,256]
func createWeights8(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int16, []int, int) {
filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
filterFactor := math.Min(1./(blur*scale), 1)
coeffs := make([]int16, dy*filterLength)
start := make([]int, dy)
for y := 0; y < dy; y++ {
interpX := scale*(float64(y)+0.5) - 0.5
start[y] = int(interpX) - filterLength/2 + 1
interpX -= float64(start[y])
for i := 0; i < filterLength; i++ {
in := (interpX - float64(i)) * filterFactor
coeffs[y*filterLength+i] = int16(kernel(in) * 256)
}
}
return coeffs, start, filterLength
}
// range [-65536,65536]
func createWeights16(dy, filterLength int, blur, scale float64, kernel func(float64) float64) ([]int32, []int, int) {
filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
filterFactor := math.Min(1./(blur*scale), 1)
coeffs := make([]int32, dy*filterLength)
start := make([]int, dy)
for y := 0; y < dy; y++ {
interpX := scale*(float64(y)+0.5) - 0.5
start[y] = int(interpX) - filterLength/2 + 1
interpX -= float64(start[y])
for i := 0; i < filterLength; i++ {
in := (interpX - float64(i)) * filterFactor
coeffs[y*filterLength+i] = int32(kernel(in) * 65536)
}
}
return coeffs, start, filterLength
}
func createWeightsNearest(dy, filterLength int, blur, scale float64) ([]bool, []int, int) {
filterLength = filterLength * int(math.Max(math.Ceil(blur*scale), 1))
filterFactor := math.Min(1./(blur*scale), 1)
coeffs := make([]bool, dy*filterLength)
start := make([]int, dy)
for y := 0; y < dy; y++ {
interpX := scale*(float64(y)+0.5) - 0.5
start[y] = int(interpX) - filterLength/2 + 1
interpX -= float64(start[y])
for i := 0; i < filterLength; i++ {
in := (interpX - float64(i)) * filterFactor
if in >= -0.5 && in < 0.5 {
coeffs[y*filterLength+i] = true
} else {
coeffs[y*filterLength+i] = false
}
}
}
return coeffs, start, filterLength
}

View File

@ -1,318 +0,0 @@
/*
Copyright (c) 2014, Charlie Vieth <charlie.vieth@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
*/
package resize
import "image"
func floatToUint8(x float32) uint8 {
// Nearest-neighbor values are always
// positive no need to check lower-bound.
if x > 0xfe {
return 0xff
}
return uint8(x)
}
func floatToUint16(x float32) uint16 {
if x > 0xfffe {
return 0xffff
}
return uint16(x)
}
func nearestGeneric(in image.Image, out *image.RGBA64, scale float64, coeffs []bool, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]float32
var sum float32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
if coeffs[ci+i] {
xi := start + i
switch {
case xi < 0:
xi = 0
case xi >= maxX:
xi = maxX
}
r, g, b, a := in.At(xi+in.Bounds().Min.X, x+in.Bounds().Min.Y).RGBA()
rgba[0] += float32(r)
rgba[1] += float32(g)
rgba[2] += float32(b)
rgba[3] += float32(a)
sum++
}
}
offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
value := floatToUint16(rgba[0] / sum)
out.Pix[offset+0] = uint8(value >> 8)
out.Pix[offset+1] = uint8(value)
value = floatToUint16(rgba[1] / sum)
out.Pix[offset+2] = uint8(value >> 8)
out.Pix[offset+3] = uint8(value)
value = floatToUint16(rgba[2] / sum)
out.Pix[offset+4] = uint8(value >> 8)
out.Pix[offset+5] = uint8(value)
value = floatToUint16(rgba[3] / sum)
out.Pix[offset+6] = uint8(value >> 8)
out.Pix[offset+7] = uint8(value)
}
}
}
func nearestRGBA(in *image.RGBA, out *image.RGBA, scale float64, coeffs []bool, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]float32
var sum float32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
if coeffs[ci+i] {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 4
case xi >= maxX:
xi = 4 * maxX
default:
xi = 0
}
rgba[0] += float32(row[xi+0])
rgba[1] += float32(row[xi+1])
rgba[2] += float32(row[xi+2])
rgba[3] += float32(row[xi+3])
sum++
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4
out.Pix[xo+0] = floatToUint8(rgba[0] / sum)
out.Pix[xo+1] = floatToUint8(rgba[1] / sum)
out.Pix[xo+2] = floatToUint8(rgba[2] / sum)
out.Pix[xo+3] = floatToUint8(rgba[3] / sum)
}
}
}
func nearestNRGBA(in *image.NRGBA, out *image.NRGBA, scale float64, coeffs []bool, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]float32
var sum float32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
if coeffs[ci+i] {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 4
case xi >= maxX:
xi = 4 * maxX
default:
xi = 0
}
rgba[0] += float32(row[xi+0])
rgba[1] += float32(row[xi+1])
rgba[2] += float32(row[xi+2])
rgba[3] += float32(row[xi+3])
sum++
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*4
out.Pix[xo+0] = floatToUint8(rgba[0] / sum)
out.Pix[xo+1] = floatToUint8(rgba[1] / sum)
out.Pix[xo+2] = floatToUint8(rgba[2] / sum)
out.Pix[xo+3] = floatToUint8(rgba[3] / sum)
}
}
}
func nearestRGBA64(in *image.RGBA64, out *image.RGBA64, scale float64, coeffs []bool, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]float32
var sum float32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
if coeffs[ci+i] {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 8
case xi >= maxX:
xi = 8 * maxX
default:
xi = 0
}
rgba[0] += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1]))
rgba[1] += float32(uint16(row[xi+2])<<8 | uint16(row[xi+3]))
rgba[2] += float32(uint16(row[xi+4])<<8 | uint16(row[xi+5]))
rgba[3] += float32(uint16(row[xi+6])<<8 | uint16(row[xi+7]))
sum++
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
value := floatToUint16(rgba[0] / sum)
out.Pix[xo+0] = uint8(value >> 8)
out.Pix[xo+1] = uint8(value)
value = floatToUint16(rgba[1] / sum)
out.Pix[xo+2] = uint8(value >> 8)
out.Pix[xo+3] = uint8(value)
value = floatToUint16(rgba[2] / sum)
out.Pix[xo+4] = uint8(value >> 8)
out.Pix[xo+5] = uint8(value)
value = floatToUint16(rgba[3] / sum)
out.Pix[xo+6] = uint8(value >> 8)
out.Pix[xo+7] = uint8(value)
}
}
}
func nearestNRGBA64(in *image.NRGBA64, out *image.NRGBA64, scale float64, coeffs []bool, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var rgba [4]float32
var sum float32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
if coeffs[ci+i] {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 8
case xi >= maxX:
xi = 8 * maxX
default:
xi = 0
}
rgba[0] += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1]))
rgba[1] += float32(uint16(row[xi+2])<<8 | uint16(row[xi+3]))
rgba[2] += float32(uint16(row[xi+4])<<8 | uint16(row[xi+5]))
rgba[3] += float32(uint16(row[xi+6])<<8 | uint16(row[xi+7]))
sum++
}
}
xo := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*8
value := floatToUint16(rgba[0] / sum)
out.Pix[xo+0] = uint8(value >> 8)
out.Pix[xo+1] = uint8(value)
value = floatToUint16(rgba[1] / sum)
out.Pix[xo+2] = uint8(value >> 8)
out.Pix[xo+3] = uint8(value)
value = floatToUint16(rgba[2] / sum)
out.Pix[xo+4] = uint8(value >> 8)
out.Pix[xo+5] = uint8(value)
value = floatToUint16(rgba[3] / sum)
out.Pix[xo+6] = uint8(value >> 8)
out.Pix[xo+7] = uint8(value)
}
}
}
func nearestGray(in *image.Gray, out *image.Gray, scale float64, coeffs []bool, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var gray float32
var sum float32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
if coeffs[ci+i] {
xi := start + i
switch {
case xi < 0:
xi = 0
case xi >= maxX:
xi = maxX
}
gray += float32(row[xi])
sum++
}
}
offset := (y-newBounds.Min.Y)*out.Stride + (x - newBounds.Min.X)
out.Pix[offset] = floatToUint8(gray / sum)
}
}
}
func nearestGray16(in *image.Gray16, out *image.Gray16, scale float64, coeffs []bool, offset []int, filterLength int) {
newBounds := out.Bounds()
maxX := in.Bounds().Dx() - 1
for x := newBounds.Min.X; x < newBounds.Max.X; x++ {
row := in.Pix[x*in.Stride:]
for y := newBounds.Min.Y; y < newBounds.Max.Y; y++ {
var gray float32
var sum float32
start := offset[y]
ci := y * filterLength
for i := 0; i < filterLength; i++ {
if coeffs[ci+i] {
xi := start + i
switch {
case uint(xi) < uint(maxX):
xi *= 2
case xi >= maxX:
xi = 2 * maxX
default:
xi = 0
}
gray += float32(uint16(row[xi+0])<<8 | uint16(row[xi+1]))
sum++
}
}
offset := (y-newBounds.Min.Y)*out.Stride + (x-newBounds.Min.X)*2
value := floatToUint16(gray / sum)
out.Pix[offset+0] = uint8(value >> 8)
out.Pix[offset+1] = uint8(value)
}
}
}

View File

@ -1,620 +0,0 @@
/*
Copyright (c) 2012, Jan Schlicht <jan.schlicht@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
*/
// Package resize implements various image resizing methods.
//
// The package works with the Image interface described in the image package.
// Various interpolation methods are provided and multiple processors may be
// utilized in the computations.
//
// Example:
// imgResized := resize.Resize(1000, 0, imgOld, resize.MitchellNetravali)
package resize
import (
"image"
"runtime"
"sync"
)
// An InterpolationFunction provides the parameters that describe an
// interpolation kernel. It returns the number of samples to take
// and the kernel function to use for sampling.
type InterpolationFunction int
// InterpolationFunction constants
const (
// Nearest-neighbor interpolation
NearestNeighbor InterpolationFunction = iota
// Bilinear interpolation
Bilinear
// Bicubic interpolation (with cubic hermite spline)
Bicubic
// Mitchell-Netravali interpolation
MitchellNetravali
// Lanczos interpolation (a=2)
Lanczos2
// Lanczos interpolation (a=3)
Lanczos3
)
// kernal, returns an InterpolationFunctions taps and kernel.
func (i InterpolationFunction) kernel() (int, func(float64) float64) {
switch i {
case Bilinear:
return 2, linear
case Bicubic:
return 4, cubic
case MitchellNetravali:
return 4, mitchellnetravali
case Lanczos2:
return 4, lanczos2
case Lanczos3:
return 6, lanczos3
default:
// Default to NearestNeighbor.
return 2, nearest
}
}
// values <1 will sharpen the image
var blur = 1.0
// Resize scales an image to new width and height using the interpolation function interp.
// A new image with the given dimensions will be returned.
// If one of the parameters width or height is set to 0, its size will be calculated so that
// the aspect ratio is that of the originating image.
// The resizing algorithm uses channels for parallel computation.
// If the input image has width or height of 0, it is returned unchanged.
func Resize(width, height uint, img image.Image, interp InterpolationFunction) image.Image {
scaleX, scaleY := calcFactors(width, height, float64(img.Bounds().Dx()), float64(img.Bounds().Dy()))
if width == 0 {
width = uint(0.7 + float64(img.Bounds().Dx())/scaleX)
}
if height == 0 {
height = uint(0.7 + float64(img.Bounds().Dy())/scaleY)
}
// Trivial case: return input image
if int(width) == img.Bounds().Dx() && int(height) == img.Bounds().Dy() {
return img
}
// Input image has no pixels
if img.Bounds().Dx() <= 0 || img.Bounds().Dy() <= 0 {
return img
}
if interp == NearestNeighbor {
return resizeNearest(width, height, scaleX, scaleY, img, interp)
}
taps, kernel := interp.kernel()
cpus := runtime.GOMAXPROCS(0)
wg := sync.WaitGroup{}
// Generic access to image.Image is slow in tight loops.
// The optimal access has to be determined from the concrete image type.
switch input := img.(type) {
case *image.RGBA:
// 8-bit precision
temp := image.NewRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewRGBA(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.RGBA)
go func() {
defer wg.Done()
resizeRGBA(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.RGBA)
go func() {
defer wg.Done()
resizeRGBA(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.NRGBA:
// 8-bit precision
temp := image.NewRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewRGBA(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.RGBA)
go func() {
defer wg.Done()
resizeNRGBA(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.RGBA)
go func() {
defer wg.Done()
resizeRGBA(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.YCbCr:
// 8-bit precision
// accessing the YCbCr arrays in a tight loop is slow.
// converting the image to ycc increases performance by 2x.
temp := newYCC(image.Rect(0, 0, input.Bounds().Dy(), int(width)), input.SubsampleRatio)
result := newYCC(image.Rect(0, 0, int(width), int(height)), image.YCbCrSubsampleRatio444)
coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel)
in := imageYCbCrToYCC(input)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*ycc)
go func() {
defer wg.Done()
resizeYCbCr(in, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*ycc)
go func() {
defer wg.Done()
resizeYCbCr(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result.YCbCr()
case *image.RGBA64:
// 16-bit precision
temp := image.NewRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
resizeRGBA64(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
resizeRGBA64(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.NRGBA64:
// 16-bit precision
temp := image.NewRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
resizeNRGBA64(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
resizeRGBA64(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.Gray:
// 8-bit precision
temp := image.NewGray(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewGray(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeights8(temp.Bounds().Dy(), taps, blur, scaleX, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.Gray)
go func() {
defer wg.Done()
resizeGray(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeights8(result.Bounds().Dy(), taps, blur, scaleY, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.Gray)
go func() {
defer wg.Done()
resizeGray(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.Gray16:
// 16-bit precision
temp := image.NewGray16(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewGray16(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.Gray16)
go func() {
defer wg.Done()
resizeGray16(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.Gray16)
go func() {
defer wg.Done()
resizeGray16(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
default:
// 16-bit precision
temp := image.NewRGBA64(image.Rect(0, 0, img.Bounds().Dy(), int(width)))
result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeights16(temp.Bounds().Dy(), taps, blur, scaleX, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
resizeGeneric(img, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeights16(result.Bounds().Dy(), taps, blur, scaleY, kernel)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
resizeRGBA64(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
}
}
func resizeNearest(width, height uint, scaleX, scaleY float64, img image.Image, interp InterpolationFunction) image.Image {
taps, _ := interp.kernel()
cpus := runtime.GOMAXPROCS(0)
wg := sync.WaitGroup{}
switch input := img.(type) {
case *image.RGBA:
// 8-bit precision
temp := image.NewRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewRGBA(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.RGBA)
go func() {
defer wg.Done()
nearestRGBA(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.RGBA)
go func() {
defer wg.Done()
nearestRGBA(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.NRGBA:
// 8-bit precision
temp := image.NewNRGBA(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewNRGBA(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.NRGBA)
go func() {
defer wg.Done()
nearestNRGBA(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.NRGBA)
go func() {
defer wg.Done()
nearestNRGBA(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.YCbCr:
// 8-bit precision
// accessing the YCbCr arrays in a tight loop is slow.
// converting the image to ycc increases performance by 2x.
temp := newYCC(image.Rect(0, 0, input.Bounds().Dy(), int(width)), input.SubsampleRatio)
result := newYCC(image.Rect(0, 0, int(width), int(height)), image.YCbCrSubsampleRatio444)
coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX)
in := imageYCbCrToYCC(input)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*ycc)
go func() {
defer wg.Done()
nearestYCbCr(in, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*ycc)
go func() {
defer wg.Done()
nearestYCbCr(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result.YCbCr()
case *image.RGBA64:
// 16-bit precision
temp := image.NewRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
nearestRGBA64(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
nearestRGBA64(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.NRGBA64:
// 16-bit precision
temp := image.NewNRGBA64(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewNRGBA64(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.NRGBA64)
go func() {
defer wg.Done()
nearestNRGBA64(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.NRGBA64)
go func() {
defer wg.Done()
nearestNRGBA64(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.Gray:
// 8-bit precision
temp := image.NewGray(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewGray(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.Gray)
go func() {
defer wg.Done()
nearestGray(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.Gray)
go func() {
defer wg.Done()
nearestGray(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
case *image.Gray16:
// 16-bit precision
temp := image.NewGray16(image.Rect(0, 0, input.Bounds().Dy(), int(width)))
result := image.NewGray16(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.Gray16)
go func() {
defer wg.Done()
nearestGray16(input, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.Gray16)
go func() {
defer wg.Done()
nearestGray16(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
default:
// 16-bit precision
temp := image.NewRGBA64(image.Rect(0, 0, img.Bounds().Dy(), int(width)))
result := image.NewRGBA64(image.Rect(0, 0, int(width), int(height)))
// horizontal filter, results in transposed temporary image
coeffs, offset, filterLength := createWeightsNearest(temp.Bounds().Dy(), taps, blur, scaleX)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(temp, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
nearestGeneric(img, slice, scaleX, coeffs, offset, filterLength)
}()
}
wg.Wait()
// horizontal filter on transposed image, result is not transposed
coeffs, offset, filterLength = createWeightsNearest(result.Bounds().Dy(), taps, blur, scaleY)
wg.Add(cpus)
for i := 0; i < cpus; i++ {
slice := makeSlice(result, i, cpus).(*image.RGBA64)
go func() {
defer wg.Done()
nearestRGBA64(temp, slice, scaleY, coeffs, offset, filterLength)
}()
}
wg.Wait()
return result
}
}
// Calculates scaling factors using old and new image dimensions.
func calcFactors(width, height uint, oldWidth, oldHeight float64) (scaleX, scaleY float64) {
if width == 0 {
if height == 0 {
scaleX = 1.0
scaleY = 1.0
} else {
scaleY = oldHeight / float64(height)
scaleX = scaleY
}
} else {
scaleX = oldWidth / float64(width)
if height == 0 {
scaleY = scaleX
} else {
scaleY = oldHeight / float64(height)
}
}
return
}
type imageWithSubImage interface {
image.Image
SubImage(image.Rectangle) image.Image
}
func makeSlice(img imageWithSubImage, i, n int) image.Image {
return img.SubImage(image.Rect(img.Bounds().Min.X, img.Bounds().Min.Y+i*img.Bounds().Dy()/n, img.Bounds().Max.X, img.Bounds().Min.Y+(i+1)*img.Bounds().Dy()/n))
}

View File

@ -1,55 +0,0 @@
/*
Copyright (c) 2012, Jan Schlicht <jan.schlicht@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
*/
package resize
import (
"image"
)
// Thumbnail will downscale provided image to max width and height preserving
// original aspect ratio and using the interpolation function interp.
// It will return original image, without processing it, if original sizes
// are already smaller than provided constraints.
func Thumbnail(maxWidth, maxHeight uint, img image.Image, interp InterpolationFunction) image.Image {
origBounds := img.Bounds()
origWidth := uint(origBounds.Dx())
origHeight := uint(origBounds.Dy())
newWidth, newHeight := origWidth, origHeight
// Return original image if it have same or smaller size as constraints
if maxWidth >= origWidth && maxHeight >= origHeight {
return img
}
// Preserve aspect ratio
if origWidth > maxWidth {
newHeight = uint(origHeight * maxWidth / origWidth)
if newHeight < 1 {
newHeight = 1
}
newWidth = maxWidth
}
if newHeight > maxHeight {
newWidth = uint(newWidth * maxHeight / newHeight)
if newWidth < 1 {
newWidth = 1
}
newHeight = maxHeight
}
return Resize(newWidth, newHeight, img, interp)
}

387
vendor/github.com/nfnt/resize/ycc.go generated vendored
View File

@ -1,387 +0,0 @@
/*
Copyright (c) 2014, Charlie Vieth <charlie.vieth@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
*/
package resize
import (
"image"
"image/color"
)
// ycc is an in memory YCbCr image. The Y, Cb and Cr samples are held in a
// single slice to increase resizing performance.
type ycc struct {
// Pix holds the image's pixels, in Y, Cb, Cr order. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*3].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
// SubsampleRatio is the subsample ratio of the original YCbCr image.
SubsampleRatio image.YCbCrSubsampleRatio
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *ycc) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3
}
func (p *ycc) Bounds() image.Rectangle {
return p.Rect
}
func (p *ycc) ColorModel() color.Model {
return color.YCbCrModel
}
func (p *ycc) At(x, y int) color.Color {
if !(image.Point{x, y}.In(p.Rect)) {
return color.YCbCr{}
}
i := p.PixOffset(x, y)
return color.YCbCr{
p.Pix[i+0],
p.Pix[i+1],
p.Pix[i+2],
}
}
func (p *ycc) Opaque() bool {
return true
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *ycc) SubImage(r image.Rectangle) image.Image {
r = r.Intersect(p.Rect)
if r.Empty() {
return &ycc{SubsampleRatio: p.SubsampleRatio}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &ycc{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
SubsampleRatio: p.SubsampleRatio,
}
}
// newYCC returns a new ycc with the given bounds and subsample ratio.
func newYCC(r image.Rectangle, s image.YCbCrSubsampleRatio) *ycc {
w, h := r.Dx(), r.Dy()
buf := make([]uint8, 3*w*h)
return &ycc{Pix: buf, Stride: 3 * w, Rect: r, SubsampleRatio: s}
}
// Copy of image.YCbCrSubsampleRatio constants - this allows us to support
// older versions of Go where these constants are not defined (i.e. Go 1.4)
const (
ycbcrSubsampleRatio444 image.YCbCrSubsampleRatio = iota
ycbcrSubsampleRatio422
ycbcrSubsampleRatio420
ycbcrSubsampleRatio440
ycbcrSubsampleRatio411
ycbcrSubsampleRatio410
)
// YCbCr converts ycc to a YCbCr image with the same subsample ratio
// as the YCbCr image that ycc was generated from.
func (p *ycc) YCbCr() *image.YCbCr {
ycbcr := image.NewYCbCr(p.Rect, p.SubsampleRatio)
switch ycbcr.SubsampleRatio {
case ycbcrSubsampleRatio422:
return p.ycbcr422(ycbcr)
case ycbcrSubsampleRatio420:
return p.ycbcr420(ycbcr)
case ycbcrSubsampleRatio440:
return p.ycbcr440(ycbcr)
case ycbcrSubsampleRatio444:
return p.ycbcr444(ycbcr)
case ycbcrSubsampleRatio411:
return p.ycbcr411(ycbcr)
case ycbcrSubsampleRatio410:
return p.ycbcr410(ycbcr)
}
return ycbcr
}
// imageYCbCrToYCC converts a YCbCr image to a ycc image for resizing.
func imageYCbCrToYCC(in *image.YCbCr) *ycc {
w, h := in.Rect.Dx(), in.Rect.Dy()
p := ycc{
Pix: make([]uint8, 3*w*h),
Stride: 3 * w,
Rect: image.Rect(0, 0, w, h),
SubsampleRatio: in.SubsampleRatio,
}
switch in.SubsampleRatio {
case ycbcrSubsampleRatio422:
return convertToYCC422(in, &p)
case ycbcrSubsampleRatio420:
return convertToYCC420(in, &p)
case ycbcrSubsampleRatio440:
return convertToYCC440(in, &p)
case ycbcrSubsampleRatio444:
return convertToYCC444(in, &p)
case ycbcrSubsampleRatio411:
return convertToYCC411(in, &p)
case ycbcrSubsampleRatio410:
return convertToYCC410(in, &p)
}
return &p
}
func (p *ycc) ycbcr422(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := y * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x/2
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr420(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := (y / 2) * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x/2
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr440(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := (y / 2) * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr444(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := y * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr411(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := y * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x/4
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr410(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := (y / 2) * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x/4
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func convertToYCC422(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := y * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x/2
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC420(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := (y / 2) * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x/2
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC440(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := (y / 2) * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC444(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := y * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC411(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := y * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x/4
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC410(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := (y / 2) * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x/4
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}

3
vendor/golang.org/x/image/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/image/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/image/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/image/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

213
vendor/golang.org/x/image/bmp/reader.go generated vendored Normal file
View File

@ -0,0 +1,213 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bmp implements a BMP image decoder and encoder.
//
// The BMP specification is at http://www.digicamsoft.com/bmp/bmp.html.
package bmp // import "golang.org/x/image/bmp"
import (
"errors"
"image"
"image/color"
"io"
)
// ErrUnsupported means that the input BMP image uses a valid but unsupported
// feature.
var ErrUnsupported = errors.New("bmp: unsupported BMP image")
func readUint16(b []byte) uint16 {
return uint16(b[0]) | uint16(b[1])<<8
}
func readUint32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
// decodePaletted reads an 8 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodePaletted(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
paletted := image.NewPaletted(image.Rect(0, 0, c.Width, c.Height), c.ColorModel.(color.Palette))
if c.Width == 0 || c.Height == 0 {
return paletted, nil
}
var tmp [4]byte
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
p := paletted.Pix[y*paletted.Stride : y*paletted.Stride+c.Width]
if _, err := io.ReadFull(r, p); err != nil {
return nil, err
}
// Each row is 4-byte aligned.
if c.Width%4 != 0 {
_, err := io.ReadFull(r, tmp[:4-c.Width%4])
if err != nil {
return nil, err
}
}
}
return paletted, nil
}
// decodeRGB reads a 24 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodeRGB(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
rgba := image.NewRGBA(image.Rect(0, 0, c.Width, c.Height))
if c.Width == 0 || c.Height == 0 {
return rgba, nil
}
// There are 3 bytes per pixel, and each row is 4-byte aligned.
b := make([]byte, (3*c.Width+3)&^3)
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
if _, err := io.ReadFull(r, b); err != nil {
return nil, err
}
p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4]
for i, j := 0, 0; i < len(p); i, j = i+4, j+3 {
// BMP images are stored in BGR order rather than RGB order.
p[i+0] = b[j+2]
p[i+1] = b[j+1]
p[i+2] = b[j+0]
p[i+3] = 0xFF
}
}
return rgba, nil
}
// decodeNRGBA reads a 32 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodeNRGBA(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
rgba := image.NewNRGBA(image.Rect(0, 0, c.Width, c.Height))
if c.Width == 0 || c.Height == 0 {
return rgba, nil
}
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4]
if _, err := io.ReadFull(r, p); err != nil {
return nil, err
}
for i := 0; i < len(p); i += 4 {
// BMP images are stored in BGRA order rather than RGBA order.
p[i+0], p[i+2] = p[i+2], p[i+0]
}
}
return rgba, nil
}
// Decode reads a BMP image from r and returns it as an image.Image.
// Limitation: The file must be 8, 24 or 32 bits per pixel.
func Decode(r io.Reader) (image.Image, error) {
c, bpp, topDown, err := decodeConfig(r)
if err != nil {
return nil, err
}
switch bpp {
case 8:
return decodePaletted(r, c, topDown)
case 24:
return decodeRGB(r, c, topDown)
case 32:
return decodeNRGBA(r, c, topDown)
}
panic("unreachable")
}
// DecodeConfig returns the color model and dimensions of a BMP image without
// decoding the entire image.
// Limitation: The file must be 8, 24 or 32 bits per pixel.
func DecodeConfig(r io.Reader) (image.Config, error) {
config, _, _, err := decodeConfig(r)
return config, err
}
func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown bool, err error) {
// We only support those BMP images that are a BITMAPFILEHEADER
// immediately followed by a BITMAPINFOHEADER.
const (
fileHeaderLen = 14
infoHeaderLen = 40
v4InfoHeaderLen = 108
v5InfoHeaderLen = 124
)
var b [1024]byte
if _, err := io.ReadFull(r, b[:fileHeaderLen+4]); err != nil {
return image.Config{}, 0, false, err
}
if string(b[:2]) != "BM" {
return image.Config{}, 0, false, errors.New("bmp: invalid format")
}
offset := readUint32(b[10:14])
infoLen := readUint32(b[14:18])
if infoLen != infoHeaderLen && infoLen != v4InfoHeaderLen && infoLen != v5InfoHeaderLen {
return image.Config{}, 0, false, ErrUnsupported
}
if _, err := io.ReadFull(r, b[fileHeaderLen+4:fileHeaderLen+infoLen]); err != nil {
return image.Config{}, 0, false, err
}
width := int(int32(readUint32(b[18:22])))
height := int(int32(readUint32(b[22:26])))
if height < 0 {
height, topDown = -height, true
}
if width < 0 || height < 0 {
return image.Config{}, 0, false, ErrUnsupported
}
// We only support 1 plane and 8, 24 or 32 bits per pixel and no
// compression.
planes, bpp, compression := readUint16(b[26:28]), readUint16(b[28:30]), readUint32(b[30:34])
// if compression is set to BITFIELDS, but the bitmask is set to the default bitmask
// that would be used if compression was set to 0, we can continue as if compression was 0
if compression == 3 && infoLen > infoHeaderLen &&
readUint32(b[54:58]) == 0xff0000 && readUint32(b[58:62]) == 0xff00 &&
readUint32(b[62:66]) == 0xff && readUint32(b[66:70]) == 0xff000000 {
compression = 0
}
if planes != 1 || compression != 0 {
return image.Config{}, 0, false, ErrUnsupported
}
switch bpp {
case 8:
if offset != fileHeaderLen+infoLen+256*4 {
return image.Config{}, 0, false, ErrUnsupported
}
_, err = io.ReadFull(r, b[:256*4])
if err != nil {
return image.Config{}, 0, false, err
}
pcm := make(color.Palette, 256)
for i := range pcm {
// BMP images are stored in BGR order rather than RGB order.
// Every 4th byte is padding.
pcm[i] = color.RGBA{b[4*i+2], b[4*i+1], b[4*i+0], 0xFF}
}
return image.Config{ColorModel: pcm, Width: width, Height: height}, 8, topDown, nil
case 24:
if offset != fileHeaderLen+infoLen {
return image.Config{}, 0, false, ErrUnsupported
}
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 24, topDown, nil
case 32:
if offset != fileHeaderLen+infoLen {
return image.Config{}, 0, false, ErrUnsupported
}
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 32, topDown, nil
}
return image.Config{}, 0, false, ErrUnsupported
}
func init() {
image.RegisterFormat("bmp", "BM????\x00\x00\x00\x00", Decode, DecodeConfig)
}

262
vendor/golang.org/x/image/bmp/writer.go generated vendored Normal file
View File

@ -0,0 +1,262 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bmp
import (
"encoding/binary"
"errors"
"image"
"io"
)
type header struct {
sigBM [2]byte
fileSize uint32
resverved [2]uint16
pixOffset uint32
dibHeaderSize uint32
width uint32
height uint32
colorPlane uint16
bpp uint16
compression uint32
imageSize uint32
xPixelsPerMeter uint32
yPixelsPerMeter uint32
colorUse uint32
colorImportant uint32
}
func encodePaletted(w io.Writer, pix []uint8, dx, dy, stride, step int) error {
var padding []byte
if dx < step {
padding = make([]byte, step-dx)
}
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx
if _, err := w.Write(pix[min:max]); err != nil {
return err
}
if padding != nil {
if _, err := w.Write(padding); err != nil {
return err
}
}
}
return nil
}
func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error {
buf := make([]byte, step)
if opaque {
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
off += 3
}
if _, err := w.Write(buf); err != nil {
return err
}
}
} else {
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
a := uint32(pix[i+3])
if a == 0 {
buf[off+2] = 0
buf[off+1] = 0
buf[off+0] = 0
buf[off+3] = 0
off += 4
continue
} else if a == 0xff {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
buf[off+3] = 0xff
off += 4
continue
}
buf[off+2] = uint8(((uint32(pix[i+0]) * 0xffff) / a) >> 8)
buf[off+1] = uint8(((uint32(pix[i+1]) * 0xffff) / a) >> 8)
buf[off+0] = uint8(((uint32(pix[i+2]) * 0xffff) / a) >> 8)
buf[off+3] = uint8(a)
off += 4
}
if _, err := w.Write(buf); err != nil {
return err
}
}
}
return nil
}
func encodeNRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error {
buf := make([]byte, step)
if opaque {
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
off += 3
}
if _, err := w.Write(buf); err != nil {
return err
}
}
} else {
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
buf[off+3] = pix[i+3]
off += 4
}
if _, err := w.Write(buf); err != nil {
return err
}
}
}
return nil
}
func encode(w io.Writer, m image.Image, step int) error {
b := m.Bounds()
buf := make([]byte, step)
for y := b.Max.Y - 1; y >= b.Min.Y; y-- {
off := 0
for x := b.Min.X; x < b.Max.X; x++ {
r, g, b, _ := m.At(x, y).RGBA()
buf[off+2] = byte(r >> 8)
buf[off+1] = byte(g >> 8)
buf[off+0] = byte(b >> 8)
off += 3
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
// Encode writes the image m to w in BMP format.
func Encode(w io.Writer, m image.Image) error {
d := m.Bounds().Size()
if d.X < 0 || d.Y < 0 {
return errors.New("bmp: negative bounds")
}
h := &header{
sigBM: [2]byte{'B', 'M'},
fileSize: 14 + 40,
pixOffset: 14 + 40,
dibHeaderSize: 40,
width: uint32(d.X),
height: uint32(d.Y),
colorPlane: 1,
}
var step int
var palette []byte
var opaque bool
switch m := m.(type) {
case *image.Gray:
step = (d.X + 3) &^ 3
palette = make([]byte, 1024)
for i := 0; i < 256; i++ {
palette[i*4+0] = uint8(i)
palette[i*4+1] = uint8(i)
palette[i*4+2] = uint8(i)
palette[i*4+3] = 0xFF
}
h.imageSize = uint32(d.Y * step)
h.fileSize += uint32(len(palette)) + h.imageSize
h.pixOffset += uint32(len(palette))
h.bpp = 8
case *image.Paletted:
step = (d.X + 3) &^ 3
palette = make([]byte, 1024)
for i := 0; i < len(m.Palette) && i < 256; i++ {
r, g, b, _ := m.Palette[i].RGBA()
palette[i*4+0] = uint8(b >> 8)
palette[i*4+1] = uint8(g >> 8)
palette[i*4+2] = uint8(r >> 8)
palette[i*4+3] = 0xFF
}
h.imageSize = uint32(d.Y * step)
h.fileSize += uint32(len(palette)) + h.imageSize
h.pixOffset += uint32(len(palette))
h.bpp = 8
case *image.RGBA:
opaque = m.Opaque()
if opaque {
step = (3*d.X + 3) &^ 3
h.bpp = 24
} else {
step = 4 * d.X
h.bpp = 32
}
h.imageSize = uint32(d.Y * step)
h.fileSize += h.imageSize
case *image.NRGBA:
opaque = m.Opaque()
if opaque {
step = (3*d.X + 3) &^ 3
h.bpp = 24
} else {
step = 4 * d.X
h.bpp = 32
}
h.imageSize = uint32(d.Y * step)
h.fileSize += h.imageSize
default:
step = (3*d.X + 3) &^ 3
h.imageSize = uint32(d.Y * step)
h.fileSize += h.imageSize
h.bpp = 24
}
if err := binary.Write(w, binary.LittleEndian, h); err != nil {
return err
}
if palette != nil {
if err := binary.Write(w, binary.LittleEndian, palette); err != nil {
return err
}
}
if d.X == 0 || d.Y == 0 {
return nil
}
switch m := m.(type) {
case *image.Gray:
return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step)
case *image.Paletted:
return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step)
case *image.RGBA:
return encodeRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque)
case *image.NRGBA:
return encodeNRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque)
}
return encode(w, m, step)
}

697
vendor/golang.org/x/image/ccitt/reader.go generated vendored Normal file
View File

@ -0,0 +1,697 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package ccitt implements a CCITT (fax) image decoder.
package ccitt
import (
"encoding/binary"
"errors"
"image"
"io"
"math/bits"
)
var (
errInvalidBounds = errors.New("ccitt: invalid bounds")
errInvalidCode = errors.New("ccitt: invalid code")
errInvalidMode = errors.New("ccitt: invalid mode")
errInvalidOffset = errors.New("ccitt: invalid offset")
errMissingEOL = errors.New("ccitt: missing End-of-Line")
errRunLengthOverflowsWidth = errors.New("ccitt: run length overflows width")
errRunLengthTooLong = errors.New("ccitt: run length too long")
errUnsupportedMode = errors.New("ccitt: unsupported mode")
errUnsupportedSubFormat = errors.New("ccitt: unsupported sub-format")
errUnsupportedWidth = errors.New("ccitt: unsupported width")
)
// Order specifies the bit ordering in a CCITT data stream.
type Order uint32
const (
// LSB means Least Significant Bits first.
LSB Order = iota
// MSB means Most Significant Bits first.
MSB
)
// SubFormat represents that the CCITT format consists of a number of
// sub-formats. Decoding or encoding a CCITT data stream requires knowing the
// sub-format context. It is not represented in the data stream per se.
type SubFormat uint32
const (
Group3 SubFormat = iota
Group4
)
// Options are optional parameters.
type Options struct {
// Align means that some variable-bit-width codes are byte-aligned.
Align bool
// Invert means that black is the 1 bit or 0xFF byte, and white is 0.
Invert bool
}
// maxWidth is the maximum (inclusive) supported width. This is a limitation of
// this implementation, to guard against integer overflow, and not anything
// inherent to the CCITT format.
const maxWidth = 1 << 20
func invertBytes(b []byte) {
for i, c := range b {
b[i] = ^c
}
}
func reverseBitsWithinBytes(b []byte) {
for i, c := range b {
b[i] = bits.Reverse8(c)
}
}
// highBits writes to dst (1 bit per pixel, most significant bit first) the
// high (0x80) bits from src (1 byte per pixel). It returns the number of bytes
// written and read such that dst[:d] is the packed form of src[:s].
//
// For example, if src starts with the 8 bytes [0x7D, 0x7E, 0x7F, 0x80, 0x81,
// 0x82, 0x00, 0xFF] then 0x1D will be written to dst[0].
//
// If src has (8 * len(dst)) or more bytes then only len(dst) bytes are
// written, (8 * len(dst)) bytes are read, and invert is ignored.
//
// Otherwise, if len(src) is not a multiple of 8 then the final byte written to
// dst is padded with 1 bits (if invert is true) or 0 bits. If inverted, the 1s
// are typically temporary, e.g. they will be flipped back to 0s by an
// invertBytes call in the highBits caller, reader.Read.
func highBits(dst []byte, src []byte, invert bool) (d int, s int) {
// Pack as many complete groups of 8 src bytes as we can.
n := len(src) / 8
if n > len(dst) {
n = len(dst)
}
dstN := dst[:n]
for i := range dstN {
src8 := src[i*8 : i*8+8]
dstN[i] = ((src8[0] & 0x80) >> 0) |
((src8[1] & 0x80) >> 1) |
((src8[2] & 0x80) >> 2) |
((src8[3] & 0x80) >> 3) |
((src8[4] & 0x80) >> 4) |
((src8[5] & 0x80) >> 5) |
((src8[6] & 0x80) >> 6) |
((src8[7] & 0x80) >> 7)
}
d, s = n, 8*n
dst, src = dst[d:], src[s:]
// Pack up to 7 remaining src bytes, if there's room in dst.
if (len(dst) > 0) && (len(src) > 0) {
dstByte := byte(0)
if invert {
dstByte = 0xFF >> uint(len(src))
}
for n, srcByte := range src {
dstByte |= (srcByte & 0x80) >> uint(n)
}
dst[0] = dstByte
d, s = d+1, s+len(src)
}
return d, s
}
type bitReader struct {
r io.Reader
// readErr is the error returned from the most recent r.Read call. As the
// io.Reader documentation says, when r.Read returns (n, err), "always
// process the n > 0 bytes returned before considering the error err".
readErr error
// order is whether to process r's bytes LSB first or MSB first.
order Order
// The high nBits bits of the bits field hold upcoming bits in MSB order.
bits uint64
nBits uint32
// bytes[br:bw] holds bytes read from r but not yet loaded into bits.
br uint32
bw uint32
bytes [1024]uint8
}
func (b *bitReader) alignToByteBoundary() {
n := b.nBits & 7
b.bits <<= n
b.nBits -= n
}
// nextBitMaxNBits is the maximum possible value of bitReader.nBits after a
// bitReader.nextBit call, provided that bitReader.nBits was not more than this
// value before that call.
//
// Note that the decode function can unread bits, which can temporarily set the
// bitReader.nBits value above nextBitMaxNBits.
const nextBitMaxNBits = 31
func (b *bitReader) nextBit() (uint64, error) {
for {
if b.nBits > 0 {
bit := b.bits >> 63
b.bits <<= 1
b.nBits--
return bit, nil
}
if available := b.bw - b.br; available >= 4 {
// Read 32 bits, even though b.bits is a uint64, since the decode
// function may need to unread up to maxCodeLength bits, putting
// them back in the remaining (64 - 32) bits. TestMaxCodeLength
// checks that the generated maxCodeLength constant fits.
//
// If changing the Uint32 call, also change nextBitMaxNBits.
b.bits = uint64(binary.BigEndian.Uint32(b.bytes[b.br:])) << 32
b.br += 4
b.nBits = 32
continue
} else if available > 0 {
b.bits = uint64(b.bytes[b.br]) << (7 * 8)
b.br++
b.nBits = 8
continue
}
if b.readErr != nil {
return 0, b.readErr
}
n, err := b.r.Read(b.bytes[:])
b.br = 0
b.bw = uint32(n)
b.readErr = err
if b.order != MSB {
reverseBitsWithinBytes(b.bytes[:b.bw])
}
}
}
func decode(b *bitReader, decodeTable [][2]int16) (uint32, error) {
nBitsRead, bitsRead, state := uint32(0), uint64(0), int32(1)
for {
bit, err := b.nextBit()
if err != nil {
return 0, err
}
bitsRead |= bit << (63 - nBitsRead)
nBitsRead++
// The "&1" is redundant, but can eliminate a bounds check.
state = int32(decodeTable[state][bit&1])
if state < 0 {
return uint32(^state), nil
} else if state == 0 {
// Unread the bits we've read, then return errInvalidCode.
b.bits = (b.bits >> nBitsRead) | bitsRead
b.nBits += nBitsRead
return 0, errInvalidCode
}
}
}
type reader struct {
br bitReader
subFormat SubFormat
// width is the image width in pixels.
width int
// rowsRemaining starts at the image height in pixels, when the reader is
// driven through the io.Reader interface, and decrements to zero as rows
// are decoded. When driven through DecodeIntoGray, this field is unused.
rowsRemaining int
// curr and prev hold the current and previous rows. Each element is either
// 0x00 (black) or 0xFF (white).
//
// prev may be nil, when processing the first row.
curr []byte
prev []byte
// ri is the read index. curr[:ri] are those bytes of curr that have been
// passed along via the Read method.
//
// When the reader is driven through DecodeIntoGray, instead of through the
// io.Reader interface, this field is unused.
ri int
// wi is the write index. curr[:wi] are those bytes of curr that have
// already been decoded via the decodeRow method.
//
// What this implementation calls wi is roughly equivalent to what the spec
// calls the a0 index.
wi int
// These fields are copied from the *Options (which may be nil).
align bool
invert bool
// atStartOfRow is whether we have just started the row. Some parts of the
// spec say to treat this situation as if "wi = -1".
atStartOfRow bool
// penColorIsWhite is whether the next run is black or white.
penColorIsWhite bool
// seenStartOfImage is whether we've called the startDecode method.
seenStartOfImage bool
// readErr is a sticky error for the Read method.
readErr error
}
func (z *reader) Read(p []byte) (int, error) {
if z.readErr != nil {
return 0, z.readErr
}
originalP := p
for len(p) > 0 {
// Allocate buffers (and decode any start-of-image codes), if
// processing the first or second row.
if z.curr == nil {
if !z.seenStartOfImage {
if z.readErr = z.startDecode(); z.readErr != nil {
break
}
z.atStartOfRow = true
}
z.curr = make([]byte, z.width)
}
// Decode the next row, if necessary.
if z.atStartOfRow {
if z.rowsRemaining <= 0 {
if z.readErr = z.finishDecode(); z.readErr != nil {
break
}
z.readErr = io.EOF
break
}
if z.readErr = z.decodeRow(); z.readErr != nil {
break
}
z.rowsRemaining--
}
// Pack from z.curr (1 byte per pixel) to p (1 bit per pixel).
packD, packS := highBits(p, z.curr[z.ri:], z.invert)
p = p[packD:]
z.ri += packS
// Prepare to decode the next row, if necessary.
if z.ri == len(z.curr) {
z.ri, z.curr, z.prev = 0, z.prev, z.curr
z.atStartOfRow = true
}
}
n := len(originalP) - len(p)
if z.invert {
invertBytes(originalP[:n])
}
return n, z.readErr
}
func (z *reader) penColor() byte {
if z.penColorIsWhite {
return 0xFF
}
return 0x00
}
func (z *reader) startDecode() error {
switch z.subFormat {
case Group3:
if err := z.decodeEOL(); err != nil {
return err
}
case Group4:
// No-op.
default:
return errUnsupportedSubFormat
}
z.seenStartOfImage = true
return nil
}
func (z *reader) finishDecode() error {
numberOfEOLs := 0
switch z.subFormat {
case Group3:
// The stream ends with a RTC (Return To Control) of 6 consecutive
// EOL's, but we should have already just seen an EOL, either in
// z.startDecode (for a zero-height image) or in z.decodeRow.
numberOfEOLs = 5
case Group4:
// The stream ends with two EOL's, the first of which is possibly
// byte-aligned.
numberOfEOLs = 2
if err := z.decodeEOL(); err == nil {
numberOfEOLs--
} else if err == errInvalidCode {
// Try again, this time starting from a byte boundary.
z.br.alignToByteBoundary()
} else {
return err
}
default:
return errUnsupportedSubFormat
}
for ; numberOfEOLs > 0; numberOfEOLs-- {
if err := z.decodeEOL(); err != nil {
return err
}
}
return nil
}
func (z *reader) decodeEOL() error {
// TODO: EOL doesn't have to be in the modeDecodeTable. It could be in its
// own table, or we could just hard-code it, especially if we might need to
// cater for optional byte-alignment, or an arbitrary number (potentially
// more than 8) of 0-valued padding bits.
if mode, err := decode(&z.br, modeDecodeTable[:]); err != nil {
return err
} else if mode != modeEOL {
return errMissingEOL
}
return nil
}
func (z *reader) decodeRow() error {
z.wi = 0
z.atStartOfRow = true
z.penColorIsWhite = true
if z.align {
z.br.alignToByteBoundary()
}
switch z.subFormat {
case Group3:
for ; z.wi < len(z.curr); z.atStartOfRow = false {
if err := z.decodeRun(); err != nil {
return err
}
}
return z.decodeEOL()
case Group4:
for ; z.wi < len(z.curr); z.atStartOfRow = false {
mode, err := decode(&z.br, modeDecodeTable[:])
if err != nil {
return err
}
rm := readerMode{}
if mode < uint32(len(readerModes)) {
rm = readerModes[mode]
}
if rm.function == nil {
return errInvalidMode
}
if err := rm.function(z, rm.arg); err != nil {
return err
}
}
return nil
}
return errUnsupportedSubFormat
}
func (z *reader) decodeRun() error {
table := blackDecodeTable[:]
if z.penColorIsWhite {
table = whiteDecodeTable[:]
}
total := 0
for {
n, err := decode(&z.br, table)
if err != nil {
return err
}
if n > maxWidth {
panic("unreachable")
}
total += int(n)
if total > maxWidth {
return errRunLengthTooLong
}
// Anything 0x3F or below is a terminal code.
if n <= 0x3F {
break
}
}
if total > (len(z.curr) - z.wi) {
return errRunLengthOverflowsWidth
}
dst := z.curr[z.wi : z.wi+total]
penColor := z.penColor()
for i := range dst {
dst[i] = penColor
}
z.wi += total
z.penColorIsWhite = !z.penColorIsWhite
return nil
}
// The various modes' semantics are based on determining a row of pixels'
// "changing elements": those pixels whose color differs from the one on its
// immediate left.
//
// The row above the first row is implicitly all white. Similarly, the column
// to the left of the first column is implicitly all white.
//
// For example, here's Figure 1 in "ITU-T Recommendation T.6", where the
// current and previous rows contain black (B) and white (w) pixels. The a?
// indexes point into curr, the b? indexes point into prev.
//
// b1 b2
// v v
// prev: BBBBBwwwwwBBBwwwww
// curr: BBBwwwwwBBBBBBwwww
// ^ ^ ^
// a0 a1 a2
//
// a0 is the "reference element" or current decoder position, roughly
// equivalent to what this implementation calls reader.wi.
//
// a1 is the next changing element to the right of a0, on the "coding line"
// (the current row).
//
// a2 is the next changing element to the right of a1, again on curr.
//
// b1 is the first changing element on the "reference line" (the previous row)
// to the right of a0 and of opposite color to a0.
//
// b2 is the next changing element to the right of b1, again on prev.
//
// The various modes calculate a1 (and a2, for modeH):
// - modePass calculates that a1 is at or to the right of b2.
// - modeH calculates a1 and a2 without considering b1 or b2.
// - modeV* calculates a1 to be b1 plus an adjustment (between -3 and +3).
const (
findB1 = false
findB2 = true
)
// findB finds either the b1 or b2 value.
func (z *reader) findB(whichB bool) int {
// The initial row is a special case. The previous row is implicitly all
// white, so that there are no changing pixel elements. We return b1 or b2
// to be at the end of the row.
if len(z.prev) != len(z.curr) {
return len(z.curr)
}
i := z.wi
if z.atStartOfRow {
// a0 is implicitly at -1, on a white pixel. b1 is the first black
// pixel in the previous row. b2 is the first white pixel after that.
for ; (i < len(z.prev)) && (z.prev[i] == 0xFF); i++ {
}
if whichB == findB2 {
for ; (i < len(z.prev)) && (z.prev[i] == 0x00); i++ {
}
}
return i
}
// As per figure 1 above, assume that the current pen color is white.
// First, walk past every contiguous black pixel in prev, starting at a0.
oppositeColor := ^z.penColor()
for ; (i < len(z.prev)) && (z.prev[i] == oppositeColor); i++ {
}
// Then walk past every contiguous white pixel.
penColor := ^oppositeColor
for ; (i < len(z.prev)) && (z.prev[i] == penColor); i++ {
}
// We're now at a black pixel (or at the end of the row). That's b1.
if whichB == findB2 {
// If we're looking for b2, walk past every contiguous black pixel
// again.
oppositeColor := ^penColor
for ; (i < len(z.prev)) && (z.prev[i] == oppositeColor); i++ {
}
}
return i
}
type readerMode struct {
function func(z *reader, arg int) error
arg int
}
var readerModes = [...]readerMode{
modePass: {function: readerModePass},
modeH: {function: readerModeH},
modeV0: {function: readerModeV, arg: +0},
modeVR1: {function: readerModeV, arg: +1},
modeVR2: {function: readerModeV, arg: +2},
modeVR3: {function: readerModeV, arg: +3},
modeVL1: {function: readerModeV, arg: -1},
modeVL2: {function: readerModeV, arg: -2},
modeVL3: {function: readerModeV, arg: -3},
modeExt: {function: readerModeExt},
}
func readerModePass(z *reader, arg int) error {
b2 := z.findB(findB2)
if (b2 < z.wi) || (len(z.curr) < b2) {
return errInvalidOffset
}
dst := z.curr[z.wi:b2]
penColor := z.penColor()
for i := range dst {
dst[i] = penColor
}
z.wi = b2
return nil
}
func readerModeH(z *reader, arg int) error {
// The first iteration finds a1. The second finds a2.
for i := 0; i < 2; i++ {
if err := z.decodeRun(); err != nil {
return err
}
}
return nil
}
func readerModeV(z *reader, arg int) error {
a1 := z.findB(findB1) + arg
if (a1 < z.wi) || (len(z.curr) < a1) {
return errInvalidOffset
}
dst := z.curr[z.wi:a1]
penColor := z.penColor()
for i := range dst {
dst[i] = penColor
}
z.wi = a1
z.penColorIsWhite = !z.penColorIsWhite
return nil
}
func readerModeExt(z *reader, arg int) error {
return errUnsupportedMode
}
// DecodeIntoGray decodes the CCITT-formatted data in r into dst.
//
// It returns an error if dst's width and height don't match the implied width
// and height of CCITT-formatted data.
func DecodeIntoGray(dst *image.Gray, r io.Reader, order Order, sf SubFormat, opts *Options) error {
bounds := dst.Bounds()
if (bounds.Dx() < 0) || (bounds.Dy() < 0) {
return errInvalidBounds
}
if bounds.Dx() > maxWidth {
return errUnsupportedWidth
}
z := reader{
br: bitReader{r: r, order: order},
subFormat: sf,
align: (opts != nil) && opts.Align,
invert: (opts != nil) && opts.Invert,
width: bounds.Dx(),
}
if err := z.startDecode(); err != nil {
return err
}
width := bounds.Dx()
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
p := (y - bounds.Min.Y) * dst.Stride
z.curr = dst.Pix[p : p+width]
if err := z.decodeRow(); err != nil {
return err
}
z.curr, z.prev = nil, z.curr
}
if err := z.finishDecode(); err != nil {
return err
}
if z.invert {
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
p := (y - bounds.Min.Y) * dst.Stride
invertBytes(dst.Pix[p : p+width])
}
}
return nil
}
// NewReader returns an io.Reader that decodes the CCITT-formatted data in r.
// The resultant byte stream is one bit per pixel (MSB first), with 1 meaning
// white and 0 meaning black. Each row in the result is byte-aligned.
func NewReader(r io.Reader, order Order, sf SubFormat, width int, height int, opts *Options) io.Reader {
readErr := error(nil)
if (width < 0) || (height < 0) {
readErr = errInvalidBounds
} else if width > maxWidth {
readErr = errUnsupportedWidth
}
return &reader{
br: bitReader{r: r, order: order},
subFormat: sf,
align: (opts != nil) && opts.Align,
invert: (opts != nil) && opts.Invert,
width: width,
rowsRemaining: height,
readErr: readErr,
}
}

989
vendor/golang.org/x/image/ccitt/table.go generated vendored Normal file
View File

@ -0,0 +1,989 @@
// generated by "go run gen.go". DO NOT EDIT.
package ccitt
// Each decodeTable is represented by an array of [2]int16's: a binary tree.
// Each array element (other than element 0, which means invalid) is a branch
// node in that tree. The root node is always element 1 (the second element).
//
// To walk the tree, look at the next bit in the bit stream, using it to select
// the first or second element of the [2]int16. If that int16 is 0, we have an
// invalid code. If it is positive, go to that branch node. If it is negative,
// then we have a leaf node, whose value is the bitwise complement (the ^
// operator) of that int16.
//
// Comments above each decodeTable also show the same structure visually. The
// "b123" lines show the 123'rd branch node. The "=XXXXX" lines show an invalid
// code. The "=v1234" lines show a leaf node with value 1234. When reading the
// bit stream, a 0 or 1 bit means to go up or down, as you move left to right.
//
// For example, in modeDecodeTable, branch node b005 is three steps up from the
// root node, meaning that we have already seen "000". If the next bit is "0"
// then we move to branch node b006. Otherwise, the next bit is "1", and we
// move to the leaf node v0000 (also known as the modePass constant). Indeed,
// the bits that encode modePass are "0001".
//
// Tables 1, 2 and 3 come from the "ITU-T Recommendation T.6: FACSIMILE CODING
// SCHEMES AND CODING CONTROL FUNCTIONS FOR GROUP 4 FACSIMILE APPARATUS"
// specification:
//
// https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-T.6-198811-I!!PDF-E&type=items
// modeDecodeTable represents Table 1 and the End-of-Line code.
//
// +=XXXXX
// b015 +-+
// | +=v0010
// b014 +-+
// | +=XXXXX
// b013 +-+
// | +=XXXXX
// b012 +-+
// | +=XXXXX
// b011 +-+
// | +=XXXXX
// b009 +-+
// | +=v0009
// b007 +-+
// | | +=v0008
// b010 | +-+
// | +=v0005
// b006 +-+
// | | +=v0007
// b008 | +-+
// | +=v0004
// b005 +-+
// | +=v0000
// b003 +-+
// | +=v0001
// b002 +-+
// | | +=v0006
// b004 | +-+
// | +=v0003
// b001 +-+
// +=v0002
var modeDecodeTable = [...][2]int16{
0: {0, 0},
1: {2, ^2},
2: {3, 4},
3: {5, ^1},
4: {^6, ^3},
5: {6, ^0},
6: {7, 8},
7: {9, 10},
8: {^7, ^4},
9: {11, ^9},
10: {^8, ^5},
11: {12, 0},
12: {13, 0},
13: {14, 0},
14: {15, 0},
15: {0, ^10},
}
// whiteDecodeTable represents Tables 2 and 3 for a white run.
//
// +=XXXXX
// b059 +-+
// | | +=v1792
// b096 | | +-+
// | | | | +=v1984
// b100 | | | +-+
// | | | +=v2048
// b094 | | +-+
// | | | | +=v2112
// b101 | | | | +-+
// | | | | | +=v2176
// b097 | | | +-+
// | | | | +=v2240
// b102 | | | +-+
// | | | +=v2304
// b085 | +-+
// | | +=v1856
// b098 | | +-+
// | | | +=v1920
// b095 | +-+
// | | +=v2368
// b103 | | +-+
// | | | +=v2432
// b099 | +-+
// | | +=v2496
// b104 | +-+
// | +=v2560
// b040 +-+
// | | +=v0029
// b060 | +-+
// | +=v0030
// b026 +-+
// | | +=v0045
// b061 | | +-+
// | | | +=v0046
// b041 | +-+
// | +=v0022
// b016 +-+
// | | +=v0023
// b042 | | +-+
// | | | | +=v0047
// b062 | | | +-+
// | | | +=v0048
// b027 | +-+
// | +=v0013
// b008 +-+
// | | +=v0020
// b043 | | +-+
// | | | | +=v0033
// b063 | | | +-+
// | | | +=v0034
// b028 | | +-+
// | | | | +=v0035
// b064 | | | | +-+
// | | | | | +=v0036
// b044 | | | +-+
// | | | | +=v0037
// b065 | | | +-+
// | | | +=v0038
// b017 | +-+
// | | +=v0019
// b045 | | +-+
// | | | | +=v0031
// b066 | | | +-+
// | | | +=v0032
// b029 | +-+
// | +=v0001
// b004 +-+
// | | +=v0012
// b030 | | +-+
// | | | | +=v0053
// b067 | | | | +-+
// | | | | | +=v0054
// b046 | | | +-+
// | | | +=v0026
// b018 | | +-+
// | | | | +=v0039
// b068 | | | | +-+
// | | | | | +=v0040
// b047 | | | | +-+
// | | | | | | +=v0041
// b069 | | | | | +-+
// | | | | | +=v0042
// b031 | | | +-+
// | | | | +=v0043
// b070 | | | | +-+
// | | | | | +=v0044
// b048 | | | +-+
// | | | +=v0021
// b009 | +-+
// | | +=v0028
// b049 | | +-+
// | | | | +=v0061
// b071 | | | +-+
// | | | +=v0062
// b032 | | +-+
// | | | | +=v0063
// b072 | | | | +-+
// | | | | | +=v0000
// b050 | | | +-+
// | | | | +=v0320
// b073 | | | +-+
// | | | +=v0384
// b019 | +-+
// | +=v0010
// b002 +-+
// | | +=v0011
// b020 | | +-+
// | | | | +=v0027
// b051 | | | | +-+
// | | | | | | +=v0059
// b074 | | | | | +-+
// | | | | | +=v0060
// b033 | | | +-+
// | | | | +=v1472
// b086 | | | | +-+
// | | | | | +=v1536
// b075 | | | | +-+
// | | | | | | +=v1600
// b087 | | | | | +-+
// | | | | | +=v1728
// b052 | | | +-+
// | | | +=v0018
// b010 | | +-+
// | | | | +=v0024
// b053 | | | | +-+
// | | | | | | +=v0049
// b076 | | | | | +-+
// | | | | | +=v0050
// b034 | | | | +-+
// | | | | | | +=v0051
// b077 | | | | | | +-+
// | | | | | | | +=v0052
// b054 | | | | | +-+
// | | | | | +=v0025
// b021 | | | +-+
// | | | | +=v0055
// b078 | | | | +-+
// | | | | | +=v0056
// b055 | | | | +-+
// | | | | | | +=v0057
// b079 | | | | | +-+
// | | | | | +=v0058
// b035 | | | +-+
// | | | +=v0192
// b005 | +-+
// | | +=v1664
// b036 | | +-+
// | | | | +=v0448
// b080 | | | | +-+
// | | | | | +=v0512
// b056 | | | +-+
// | | | | +=v0704
// b088 | | | | +-+
// | | | | | +=v0768
// b081 | | | +-+
// | | | +=v0640
// b022 | | +-+
// | | | | +=v0576
// b082 | | | | +-+
// | | | | | | +=v0832
// b089 | | | | | +-+
// | | | | | +=v0896
// b057 | | | | +-+
// | | | | | | +=v0960
// b090 | | | | | | +-+
// | | | | | | | +=v1024
// b083 | | | | | +-+
// | | | | | | +=v1088
// b091 | | | | | +-+
// | | | | | +=v1152
// b037 | | | +-+
// | | | | +=v1216
// b092 | | | | +-+
// | | | | | +=v1280
// b084 | | | | +-+
// | | | | | | +=v1344
// b093 | | | | | +-+
// | | | | | +=v1408
// b058 | | | +-+
// | | | +=v0256
// b011 | +-+
// | +=v0002
// b001 +-+
// | +=v0003
// b012 | +-+
// | | | +=v0128
// b023 | | +-+
// | | +=v0008
// b006 | +-+
// | | | +=v0009
// b024 | | | +-+
// | | | | | +=v0016
// b038 | | | | +-+
// | | | | +=v0017
// b013 | | +-+
// | | +=v0004
// b003 +-+
// | +=v0005
// b014 | +-+
// | | | +=v0014
// b039 | | | +-+
// | | | | +=v0015
// b025 | | +-+
// | | +=v0064
// b007 +-+
// | +=v0006
// b015 +-+
// +=v0007
var whiteDecodeTable = [...][2]int16{
0: {0, 0},
1: {2, 3},
2: {4, 5},
3: {6, 7},
4: {8, 9},
5: {10, 11},
6: {12, 13},
7: {14, 15},
8: {16, 17},
9: {18, 19},
10: {20, 21},
11: {22, ^2},
12: {^3, 23},
13: {24, ^4},
14: {^5, 25},
15: {^6, ^7},
16: {26, 27},
17: {28, 29},
18: {30, 31},
19: {32, ^10},
20: {^11, 33},
21: {34, 35},
22: {36, 37},
23: {^128, ^8},
24: {^9, 38},
25: {39, ^64},
26: {40, 41},
27: {42, ^13},
28: {43, 44},
29: {45, ^1},
30: {^12, 46},
31: {47, 48},
32: {49, 50},
33: {51, 52},
34: {53, 54},
35: {55, ^192},
36: {^1664, 56},
37: {57, 58},
38: {^16, ^17},
39: {^14, ^15},
40: {59, 60},
41: {61, ^22},
42: {^23, 62},
43: {^20, 63},
44: {64, 65},
45: {^19, 66},
46: {67, ^26},
47: {68, 69},
48: {70, ^21},
49: {^28, 71},
50: {72, 73},
51: {^27, 74},
52: {75, ^18},
53: {^24, 76},
54: {77, ^25},
55: {78, 79},
56: {80, 81},
57: {82, 83},
58: {84, ^256},
59: {0, 85},
60: {^29, ^30},
61: {^45, ^46},
62: {^47, ^48},
63: {^33, ^34},
64: {^35, ^36},
65: {^37, ^38},
66: {^31, ^32},
67: {^53, ^54},
68: {^39, ^40},
69: {^41, ^42},
70: {^43, ^44},
71: {^61, ^62},
72: {^63, ^0},
73: {^320, ^384},
74: {^59, ^60},
75: {86, 87},
76: {^49, ^50},
77: {^51, ^52},
78: {^55, ^56},
79: {^57, ^58},
80: {^448, ^512},
81: {88, ^640},
82: {^576, 89},
83: {90, 91},
84: {92, 93},
85: {94, 95},
86: {^1472, ^1536},
87: {^1600, ^1728},
88: {^704, ^768},
89: {^832, ^896},
90: {^960, ^1024},
91: {^1088, ^1152},
92: {^1216, ^1280},
93: {^1344, ^1408},
94: {96, 97},
95: {98, 99},
96: {^1792, 100},
97: {101, 102},
98: {^1856, ^1920},
99: {103, 104},
100: {^1984, ^2048},
101: {^2112, ^2176},
102: {^2240, ^2304},
103: {^2368, ^2432},
104: {^2496, ^2560},
}
// blackDecodeTable represents Tables 2 and 3 for a black run.
//
// +=XXXXX
// b017 +-+
// | | +=v1792
// b042 | | +-+
// | | | | +=v1984
// b063 | | | +-+
// | | | +=v2048
// b029 | | +-+
// | | | | +=v2112
// b064 | | | | +-+
// | | | | | +=v2176
// b043 | | | +-+
// | | | | +=v2240
// b065 | | | +-+
// | | | +=v2304
// b022 | +-+
// | | +=v1856
// b044 | | +-+
// | | | +=v1920
// b030 | +-+
// | | +=v2368
// b066 | | +-+
// | | | +=v2432
// b045 | +-+
// | | +=v2496
// b067 | +-+
// | +=v2560
// b013 +-+
// | | +=v0018
// b031 | | +-+
// | | | | +=v0052
// b068 | | | | +-+
// | | | | | | +=v0640
// b095 | | | | | +-+
// | | | | | +=v0704
// b046 | | | +-+
// | | | | +=v0768
// b096 | | | | +-+
// | | | | | +=v0832
// b069 | | | +-+
// | | | +=v0055
// b023 | | +-+
// | | | | +=v0056
// b070 | | | | +-+
// | | | | | | +=v1280
// b097 | | | | | +-+
// | | | | | +=v1344
// b047 | | | | +-+
// | | | | | | +=v1408
// b098 | | | | | | +-+
// | | | | | | | +=v1472
// b071 | | | | | +-+
// | | | | | +=v0059
// b032 | | | +-+
// | | | | +=v0060
// b072 | | | | +-+
// | | | | | | +=v1536
// b099 | | | | | +-+
// | | | | | +=v1600
// b048 | | | +-+
// | | | +=v0024
// b018 | +-+
// | | +=v0025
// b049 | | +-+
// | | | | +=v1664
// b100 | | | | +-+
// | | | | | +=v1728
// b073 | | | +-+
// | | | +=v0320
// b033 | | +-+
// | | | | +=v0384
// b074 | | | | +-+
// | | | | | +=v0448
// b050 | | | +-+
// | | | | +=v0512
// b101 | | | | +-+
// | | | | | +=v0576
// b075 | | | +-+
// | | | +=v0053
// b024 | +-+
// | | +=v0054
// b076 | | +-+
// | | | | +=v0896
// b102 | | | +-+
// | | | +=v0960
// b051 | | +-+
// | | | | +=v1024
// b103 | | | | +-+
// | | | | | +=v1088
// b077 | | | +-+
// | | | | +=v1152
// b104 | | | +-+
// | | | +=v1216
// b034 | +-+
// | +=v0064
// b010 +-+
// | | +=v0013
// b019 | | +-+
// | | | | +=v0023
// b052 | | | | +-+
// | | | | | | +=v0050
// b078 | | | | | +-+
// | | | | | +=v0051
// b035 | | | | +-+
// | | | | | | +=v0044
// b079 | | | | | | +-+
// | | | | | | | +=v0045
// b053 | | | | | +-+
// | | | | | | +=v0046
// b080 | | | | | +-+
// | | | | | +=v0047
// b025 | | | +-+
// | | | | +=v0057
// b081 | | | | +-+
// | | | | | +=v0058
// b054 | | | | +-+
// | | | | | | +=v0061
// b082 | | | | | +-+
// | | | | | +=v0256
// b036 | | | +-+
// | | | +=v0016
// b014 | +-+
// | | +=v0017
// b037 | | +-+
// | | | | +=v0048
// b083 | | | | +-+
// | | | | | +=v0049
// b055 | | | +-+
// | | | | +=v0062
// b084 | | | +-+
// | | | +=v0063
// b026 | | +-+
// | | | | +=v0030
// b085 | | | | +-+
// | | | | | +=v0031
// b056 | | | | +-+
// | | | | | | +=v0032
// b086 | | | | | +-+
// | | | | | +=v0033
// b038 | | | +-+
// | | | | +=v0040
// b087 | | | | +-+
// | | | | | +=v0041
// b057 | | | +-+
// | | | +=v0022
// b020 | +-+
// | +=v0014
// b008 +-+
// | | +=v0010
// b015 | | +-+
// | | | +=v0011
// b011 | +-+
// | | +=v0015
// b027 | | +-+
// | | | | +=v0128
// b088 | | | | +-+
// | | | | | +=v0192
// b058 | | | | +-+
// | | | | | | +=v0026
// b089 | | | | | +-+
// | | | | | +=v0027
// b039 | | | +-+
// | | | | +=v0028
// b090 | | | | +-+
// | | | | | +=v0029
// b059 | | | +-+
// | | | +=v0019
// b021 | | +-+
// | | | | +=v0020
// b060 | | | | +-+
// | | | | | | +=v0034
// b091 | | | | | +-+
// | | | | | +=v0035
// b040 | | | | +-+
// | | | | | | +=v0036
// b092 | | | | | | +-+
// | | | | | | | +=v0037
// b061 | | | | | +-+
// | | | | | | +=v0038
// b093 | | | | | +-+
// | | | | | +=v0039
// b028 | | | +-+
// | | | | +=v0021
// b062 | | | | +-+
// | | | | | | +=v0042
// b094 | | | | | +-+
// | | | | | +=v0043
// b041 | | | +-+
// | | | +=v0000
// b016 | +-+
// | +=v0012
// b006 +-+
// | | +=v0009
// b012 | | +-+
// | | | +=v0008
// b009 | +-+
// | +=v0007
// b004 +-+
// | | +=v0006
// b007 | +-+
// | +=v0005
// b002 +-+
// | | +=v0001
// b005 | +-+
// | +=v0004
// b001 +-+
// | +=v0003
// b003 +-+
// +=v0002
var blackDecodeTable = [...][2]int16{
0: {0, 0},
1: {2, 3},
2: {4, 5},
3: {^3, ^2},
4: {6, 7},
5: {^1, ^4},
6: {8, 9},
7: {^6, ^5},
8: {10, 11},
9: {12, ^7},
10: {13, 14},
11: {15, 16},
12: {^9, ^8},
13: {17, 18},
14: {19, 20},
15: {^10, ^11},
16: {21, ^12},
17: {0, 22},
18: {23, 24},
19: {^13, 25},
20: {26, ^14},
21: {27, 28},
22: {29, 30},
23: {31, 32},
24: {33, 34},
25: {35, 36},
26: {37, 38},
27: {^15, 39},
28: {40, 41},
29: {42, 43},
30: {44, 45},
31: {^18, 46},
32: {47, 48},
33: {49, 50},
34: {51, ^64},
35: {52, 53},
36: {54, ^16},
37: {^17, 55},
38: {56, 57},
39: {58, 59},
40: {60, 61},
41: {62, ^0},
42: {^1792, 63},
43: {64, 65},
44: {^1856, ^1920},
45: {66, 67},
46: {68, 69},
47: {70, 71},
48: {72, ^24},
49: {^25, 73},
50: {74, 75},
51: {76, 77},
52: {^23, 78},
53: {79, 80},
54: {81, 82},
55: {83, 84},
56: {85, 86},
57: {87, ^22},
58: {88, 89},
59: {90, ^19},
60: {^20, 91},
61: {92, 93},
62: {^21, 94},
63: {^1984, ^2048},
64: {^2112, ^2176},
65: {^2240, ^2304},
66: {^2368, ^2432},
67: {^2496, ^2560},
68: {^52, 95},
69: {96, ^55},
70: {^56, 97},
71: {98, ^59},
72: {^60, 99},
73: {100, ^320},
74: {^384, ^448},
75: {101, ^53},
76: {^54, 102},
77: {103, 104},
78: {^50, ^51},
79: {^44, ^45},
80: {^46, ^47},
81: {^57, ^58},
82: {^61, ^256},
83: {^48, ^49},
84: {^62, ^63},
85: {^30, ^31},
86: {^32, ^33},
87: {^40, ^41},
88: {^128, ^192},
89: {^26, ^27},
90: {^28, ^29},
91: {^34, ^35},
92: {^36, ^37},
93: {^38, ^39},
94: {^42, ^43},
95: {^640, ^704},
96: {^768, ^832},
97: {^1280, ^1344},
98: {^1408, ^1472},
99: {^1536, ^1600},
100: {^1664, ^1728},
101: {^512, ^576},
102: {^896, ^960},
103: {^1024, ^1088},
104: {^1152, ^1216},
}
const maxCodeLength = 13
// Each encodeTable is represented by an array of bitStrings.
// bitString is a pair of uint32 values representing a bit code.
// The nBits low bits of bits make up the actual bit code.
// Eg. bitString{0x0004, 8} represents the bitcode "00000100".
type bitString struct {
bits uint32
nBits uint32
}
// modeEncodeTable represents Table 1 and the End-of-Line code.
var modeEncodeTable = [...]bitString{
0: {0x0001, 4}, // "0001"
1: {0x0001, 3}, // "001"
2: {0x0001, 1}, // "1"
3: {0x0003, 3}, // "011"
4: {0x0003, 6}, // "000011"
5: {0x0003, 7}, // "0000011"
6: {0x0002, 3}, // "010"
7: {0x0002, 6}, // "000010"
8: {0x0002, 7}, // "0000010"
9: {0x0001, 7}, // "0000001"
10: {0x0001, 12}, // "000000000001"
}
// whiteEncodeTable2 represents Table 2 for a white run.
var whiteEncodeTable2 = [...]bitString{
0: {0x0035, 8}, // "00110101"
1: {0x0007, 6}, // "000111"
2: {0x0007, 4}, // "0111"
3: {0x0008, 4}, // "1000"
4: {0x000b, 4}, // "1011"
5: {0x000c, 4}, // "1100"
6: {0x000e, 4}, // "1110"
7: {0x000f, 4}, // "1111"
8: {0x0013, 5}, // "10011"
9: {0x0014, 5}, // "10100"
10: {0x0007, 5}, // "00111"
11: {0x0008, 5}, // "01000"
12: {0x0008, 6}, // "001000"
13: {0x0003, 6}, // "000011"
14: {0x0034, 6}, // "110100"
15: {0x0035, 6}, // "110101"
16: {0x002a, 6}, // "101010"
17: {0x002b, 6}, // "101011"
18: {0x0027, 7}, // "0100111"
19: {0x000c, 7}, // "0001100"
20: {0x0008, 7}, // "0001000"
21: {0x0017, 7}, // "0010111"
22: {0x0003, 7}, // "0000011"
23: {0x0004, 7}, // "0000100"
24: {0x0028, 7}, // "0101000"
25: {0x002b, 7}, // "0101011"
26: {0x0013, 7}, // "0010011"
27: {0x0024, 7}, // "0100100"
28: {0x0018, 7}, // "0011000"
29: {0x0002, 8}, // "00000010"
30: {0x0003, 8}, // "00000011"
31: {0x001a, 8}, // "00011010"
32: {0x001b, 8}, // "00011011"
33: {0x0012, 8}, // "00010010"
34: {0x0013, 8}, // "00010011"
35: {0x0014, 8}, // "00010100"
36: {0x0015, 8}, // "00010101"
37: {0x0016, 8}, // "00010110"
38: {0x0017, 8}, // "00010111"
39: {0x0028, 8}, // "00101000"
40: {0x0029, 8}, // "00101001"
41: {0x002a, 8}, // "00101010"
42: {0x002b, 8}, // "00101011"
43: {0x002c, 8}, // "00101100"
44: {0x002d, 8}, // "00101101"
45: {0x0004, 8}, // "00000100"
46: {0x0005, 8}, // "00000101"
47: {0x000a, 8}, // "00001010"
48: {0x000b, 8}, // "00001011"
49: {0x0052, 8}, // "01010010"
50: {0x0053, 8}, // "01010011"
51: {0x0054, 8}, // "01010100"
52: {0x0055, 8}, // "01010101"
53: {0x0024, 8}, // "00100100"
54: {0x0025, 8}, // "00100101"
55: {0x0058, 8}, // "01011000"
56: {0x0059, 8}, // "01011001"
57: {0x005a, 8}, // "01011010"
58: {0x005b, 8}, // "01011011"
59: {0x004a, 8}, // "01001010"
60: {0x004b, 8}, // "01001011"
61: {0x0032, 8}, // "00110010"
62: {0x0033, 8}, // "00110011"
63: {0x0034, 8}, // "00110100"
}
// whiteEncodeTable3 represents Table 3 for a white run.
var whiteEncodeTable3 = [...]bitString{
0: {0x001b, 5}, // "11011"
1: {0x0012, 5}, // "10010"
2: {0x0017, 6}, // "010111"
3: {0x0037, 7}, // "0110111"
4: {0x0036, 8}, // "00110110"
5: {0x0037, 8}, // "00110111"
6: {0x0064, 8}, // "01100100"
7: {0x0065, 8}, // "01100101"
8: {0x0068, 8}, // "01101000"
9: {0x0067, 8}, // "01100111"
10: {0x00cc, 9}, // "011001100"
11: {0x00cd, 9}, // "011001101"
12: {0x00d2, 9}, // "011010010"
13: {0x00d3, 9}, // "011010011"
14: {0x00d4, 9}, // "011010100"
15: {0x00d5, 9}, // "011010101"
16: {0x00d6, 9}, // "011010110"
17: {0x00d7, 9}, // "011010111"
18: {0x00d8, 9}, // "011011000"
19: {0x00d9, 9}, // "011011001"
20: {0x00da, 9}, // "011011010"
21: {0x00db, 9}, // "011011011"
22: {0x0098, 9}, // "010011000"
23: {0x0099, 9}, // "010011001"
24: {0x009a, 9}, // "010011010"
25: {0x0018, 6}, // "011000"
26: {0x009b, 9}, // "010011011"
27: {0x0008, 11}, // "00000001000"
28: {0x000c, 11}, // "00000001100"
29: {0x000d, 11}, // "00000001101"
30: {0x0012, 12}, // "000000010010"
31: {0x0013, 12}, // "000000010011"
32: {0x0014, 12}, // "000000010100"
33: {0x0015, 12}, // "000000010101"
34: {0x0016, 12}, // "000000010110"
35: {0x0017, 12}, // "000000010111"
36: {0x001c, 12}, // "000000011100"
37: {0x001d, 12}, // "000000011101"
38: {0x001e, 12}, // "000000011110"
39: {0x001f, 12}, // "000000011111"
}
// blackEncodeTable2 represents Table 2 for a black run.
var blackEncodeTable2 = [...]bitString{
0: {0x0037, 10}, // "0000110111"
1: {0x0002, 3}, // "010"
2: {0x0003, 2}, // "11"
3: {0x0002, 2}, // "10"
4: {0x0003, 3}, // "011"
5: {0x0003, 4}, // "0011"
6: {0x0002, 4}, // "0010"
7: {0x0003, 5}, // "00011"
8: {0x0005, 6}, // "000101"
9: {0x0004, 6}, // "000100"
10: {0x0004, 7}, // "0000100"
11: {0x0005, 7}, // "0000101"
12: {0x0007, 7}, // "0000111"
13: {0x0004, 8}, // "00000100"
14: {0x0007, 8}, // "00000111"
15: {0x0018, 9}, // "000011000"
16: {0x0017, 10}, // "0000010111"
17: {0x0018, 10}, // "0000011000"
18: {0x0008, 10}, // "0000001000"
19: {0x0067, 11}, // "00001100111"
20: {0x0068, 11}, // "00001101000"
21: {0x006c, 11}, // "00001101100"
22: {0x0037, 11}, // "00000110111"
23: {0x0028, 11}, // "00000101000"
24: {0x0017, 11}, // "00000010111"
25: {0x0018, 11}, // "00000011000"
26: {0x00ca, 12}, // "000011001010"
27: {0x00cb, 12}, // "000011001011"
28: {0x00cc, 12}, // "000011001100"
29: {0x00cd, 12}, // "000011001101"
30: {0x0068, 12}, // "000001101000"
31: {0x0069, 12}, // "000001101001"
32: {0x006a, 12}, // "000001101010"
33: {0x006b, 12}, // "000001101011"
34: {0x00d2, 12}, // "000011010010"
35: {0x00d3, 12}, // "000011010011"
36: {0x00d4, 12}, // "000011010100"
37: {0x00d5, 12}, // "000011010101"
38: {0x00d6, 12}, // "000011010110"
39: {0x00d7, 12}, // "000011010111"
40: {0x006c, 12}, // "000001101100"
41: {0x006d, 12}, // "000001101101"
42: {0x00da, 12}, // "000011011010"
43: {0x00db, 12}, // "000011011011"
44: {0x0054, 12}, // "000001010100"
45: {0x0055, 12}, // "000001010101"
46: {0x0056, 12}, // "000001010110"
47: {0x0057, 12}, // "000001010111"
48: {0x0064, 12}, // "000001100100"
49: {0x0065, 12}, // "000001100101"
50: {0x0052, 12}, // "000001010010"
51: {0x0053, 12}, // "000001010011"
52: {0x0024, 12}, // "000000100100"
53: {0x0037, 12}, // "000000110111"
54: {0x0038, 12}, // "000000111000"
55: {0x0027, 12}, // "000000100111"
56: {0x0028, 12}, // "000000101000"
57: {0x0058, 12}, // "000001011000"
58: {0x0059, 12}, // "000001011001"
59: {0x002b, 12}, // "000000101011"
60: {0x002c, 12}, // "000000101100"
61: {0x005a, 12}, // "000001011010"
62: {0x0066, 12}, // "000001100110"
63: {0x0067, 12}, // "000001100111"
}
// blackEncodeTable3 represents Table 3 for a black run.
var blackEncodeTable3 = [...]bitString{
0: {0x000f, 10}, // "0000001111"
1: {0x00c8, 12}, // "000011001000"
2: {0x00c9, 12}, // "000011001001"
3: {0x005b, 12}, // "000001011011"
4: {0x0033, 12}, // "000000110011"
5: {0x0034, 12}, // "000000110100"
6: {0x0035, 12}, // "000000110101"
7: {0x006c, 13}, // "0000001101100"
8: {0x006d, 13}, // "0000001101101"
9: {0x004a, 13}, // "0000001001010"
10: {0x004b, 13}, // "0000001001011"
11: {0x004c, 13}, // "0000001001100"
12: {0x004d, 13}, // "0000001001101"
13: {0x0072, 13}, // "0000001110010"
14: {0x0073, 13}, // "0000001110011"
15: {0x0074, 13}, // "0000001110100"
16: {0x0075, 13}, // "0000001110101"
17: {0x0076, 13}, // "0000001110110"
18: {0x0077, 13}, // "0000001110111"
19: {0x0052, 13}, // "0000001010010"
20: {0x0053, 13}, // "0000001010011"
21: {0x0054, 13}, // "0000001010100"
22: {0x0055, 13}, // "0000001010101"
23: {0x005a, 13}, // "0000001011010"
24: {0x005b, 13}, // "0000001011011"
25: {0x0064, 13}, // "0000001100100"
26: {0x0065, 13}, // "0000001100101"
27: {0x0008, 11}, // "00000001000"
28: {0x000c, 11}, // "00000001100"
29: {0x000d, 11}, // "00000001101"
30: {0x0012, 12}, // "000000010010"
31: {0x0013, 12}, // "000000010011"
32: {0x0014, 12}, // "000000010100"
33: {0x0015, 12}, // "000000010101"
34: {0x0016, 12}, // "000000010110"
35: {0x0017, 12}, // "000000010111"
36: {0x001c, 12}, // "000000011100"
37: {0x001d, 12}, // "000000011101"
38: {0x001e, 12}, // "000000011110"
39: {0x001f, 12}, // "000000011111"
}
// COPY PASTE table.go BEGIN
const (
modePass = iota // Pass
modeH // Horizontal
modeV0 // Vertical-0
modeVR1 // Vertical-Right-1
modeVR2 // Vertical-Right-2
modeVR3 // Vertical-Right-3
modeVL1 // Vertical-Left-1
modeVL2 // Vertical-Left-2
modeVL3 // Vertical-Left-3
modeExt // Extension
modeEOL // End-of-Line
)
// COPY PASTE table.go END

102
vendor/golang.org/x/image/ccitt/writer.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ccitt
import (
"encoding/binary"
"io"
)
type bitWriter struct {
w io.Writer
// order is whether to process w's bytes LSB first or MSB first.
order Order
// The high nBits bits of the bits field hold encoded bits to be written to w.
bits uint64
nBits uint32
// bytes[:bw] holds encoded bytes not yet written to w.
// Overflow protection is ensured by using a multiple of 8 as bytes length.
bw uint32
bytes [1024]uint8
}
// flushBits copies 64 bits from b.bits to b.bytes. If b.bytes is then full, it
// is written to b.w.
func (b *bitWriter) flushBits() error {
binary.BigEndian.PutUint64(b.bytes[b.bw:], b.bits)
b.bits = 0
b.nBits = 0
b.bw += 8
if b.bw < uint32(len(b.bytes)) {
return nil
}
b.bw = 0
if b.order != MSB {
reverseBitsWithinBytes(b.bytes[:])
}
_, err := b.w.Write(b.bytes[:])
return err
}
// close finalizes a bitcode stream by writing any
// pending bits to bitWriter's underlying io.Writer.
func (b *bitWriter) close() error {
// Write any encoded bits to bytes.
if b.nBits > 0 {
binary.BigEndian.PutUint64(b.bytes[b.bw:], b.bits)
b.bw += (b.nBits + 7) >> 3
}
if b.order != MSB {
reverseBitsWithinBytes(b.bytes[:b.bw])
}
// Write b.bw bytes to b.w.
_, err := b.w.Write(b.bytes[:b.bw])
return err
}
// alignToByteBoundary rounds b.nBits up to a multiple of 8.
// If all 64 bits are used, flush them to bitWriter's bytes.
func (b *bitWriter) alignToByteBoundary() error {
if b.nBits = (b.nBits + 7) &^ 7; b.nBits == 64 {
return b.flushBits()
}
return nil
}
// writeCode writes a variable length bitcode to b's underlying io.Writer.
func (b *bitWriter) writeCode(bs bitString) error {
bits := bs.bits
nBits := bs.nBits
if 64-b.nBits >= nBits {
// b.bits has sufficient room for storing nBits bits.
b.bits |= uint64(bits) << (64 - nBits - b.nBits)
b.nBits += nBits
if b.nBits == 64 {
return b.flushBits()
}
return nil
}
// Number of leading bits that fill b.bits.
i := 64 - b.nBits
// Fill b.bits then flush and write remaining bits.
b.bits |= uint64(bits) >> (nBits - i)
b.nBits = 64
if err := b.flushBits(); err != nil {
return err
}
nBits -= i
b.bits = uint64(bits) << (64 - nBits)
b.nBits = nBits
return nil
}

69
vendor/golang.org/x/image/tiff/buffer.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tiff
import "io"
// buffer buffers an io.Reader to satisfy io.ReaderAt.
type buffer struct {
r io.Reader
buf []byte
}
// fill reads data from b.r until the buffer contains at least end bytes.
func (b *buffer) fill(end int) error {
m := len(b.buf)
if end > m {
if end > cap(b.buf) {
newcap := 1024
for newcap < end {
newcap *= 2
}
newbuf := make([]byte, end, newcap)
copy(newbuf, b.buf)
b.buf = newbuf
} else {
b.buf = b.buf[:end]
}
if n, err := io.ReadFull(b.r, b.buf[m:end]); err != nil {
end = m + n
b.buf = b.buf[:end]
return err
}
}
return nil
}
func (b *buffer) ReadAt(p []byte, off int64) (int, error) {
o := int(off)
end := o + len(p)
if int64(end) != off+int64(len(p)) {
return 0, io.ErrUnexpectedEOF
}
err := b.fill(end)
return copy(p, b.buf[o:end]), err
}
// Slice returns a slice of the underlying buffer. The slice contains
// n bytes starting at offset off.
func (b *buffer) Slice(off, n int) ([]byte, error) {
end := off + n
if err := b.fill(end); err != nil {
return nil, err
}
return b.buf[off:end], nil
}
// newReaderAt converts an io.Reader into an io.ReaderAt.
func newReaderAt(r io.Reader) io.ReaderAt {
if ra, ok := r.(io.ReaderAt); ok {
return ra
}
return &buffer{
r: r,
buf: make([]byte, 0, 1024),
}
}

58
vendor/golang.org/x/image/tiff/compress.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tiff
import (
"bufio"
"io"
)
type byteReader interface {
io.Reader
io.ByteReader
}
// unpackBits decodes the PackBits-compressed data in src and returns the
// uncompressed data.
//
// The PackBits compression format is described in section 9 (p. 42)
// of the TIFF spec.
func unpackBits(r io.Reader) ([]byte, error) {
buf := make([]byte, 128)
dst := make([]byte, 0, 1024)
br, ok := r.(byteReader)
if !ok {
br = bufio.NewReader(r)
}
for {
b, err := br.ReadByte()
if err != nil {
if err == io.EOF {
return dst, nil
}
return nil, err
}
code := int(int8(b))
switch {
case code >= 0:
n, err := io.ReadFull(br, buf[:code+1])
if err != nil {
return nil, err
}
dst = append(dst, buf[:n]...)
case code == -128:
// No-op.
default:
if b, err = br.ReadByte(); err != nil {
return nil, err
}
for j := 0; j < 1-code; j++ {
buf[j] = b
}
dst = append(dst, buf[:1-code]...)
}
}
}

149
vendor/golang.org/x/image/tiff/consts.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tiff
// A tiff image file contains one or more images. The metadata
// of each image is contained in an Image File Directory (IFD),
// which contains entries of 12 bytes each and is described
// on page 14-16 of the specification. An IFD entry consists of
//
// - a tag, which describes the signification of the entry,
// - the data type and length of the entry,
// - the data itself or a pointer to it if it is more than 4 bytes.
//
// The presence of a length means that each IFD is effectively an array.
const (
leHeader = "II\x2A\x00" // Header for little-endian files.
beHeader = "MM\x00\x2A" // Header for big-endian files.
ifdLen = 12 // Length of an IFD entry in bytes.
)
// Data types (p. 14-16 of the spec).
const (
dtByte = 1
dtASCII = 2
dtShort = 3
dtLong = 4
dtRational = 5
)
// The length of one instance of each data type in bytes.
var lengths = [...]uint32{0, 1, 1, 2, 4, 8}
// Tags (see p. 28-41 of the spec).
const (
tImageWidth = 256
tImageLength = 257
tBitsPerSample = 258
tCompression = 259
tPhotometricInterpretation = 262
tFillOrder = 266
tStripOffsets = 273
tSamplesPerPixel = 277
tRowsPerStrip = 278
tStripByteCounts = 279
tT4Options = 292 // CCITT Group 3 options, a set of 32 flag bits.
tT6Options = 293 // CCITT Group 4 options, a set of 32 flag bits.
tTileWidth = 322
tTileLength = 323
tTileOffsets = 324
tTileByteCounts = 325
tXResolution = 282
tYResolution = 283
tResolutionUnit = 296
tPredictor = 317
tColorMap = 320
tExtraSamples = 338
tSampleFormat = 339
)
// Compression types (defined in various places in the spec and supplements).
const (
cNone = 1
cCCITT = 2
cG3 = 3 // Group 3 Fax.
cG4 = 4 // Group 4 Fax.
cLZW = 5
cJPEGOld = 6 // Superseded by cJPEG.
cJPEG = 7
cDeflate = 8 // zlib compression.
cPackBits = 32773
cDeflateOld = 32946 // Superseded by cDeflate.
)
// Photometric interpretation values (see p. 37 of the spec).
const (
pWhiteIsZero = 0
pBlackIsZero = 1
pRGB = 2
pPaletted = 3
pTransMask = 4 // transparency mask
pCMYK = 5
pYCbCr = 6
pCIELab = 8
)
// Values for the tPredictor tag (page 64-65 of the spec).
const (
prNone = 1
prHorizontal = 2
)
// Values for the tResolutionUnit tag (page 18).
const (
resNone = 1
resPerInch = 2 // Dots per inch.
resPerCM = 3 // Dots per centimeter.
)
// imageMode represents the mode of the image.
type imageMode int
const (
mBilevel imageMode = iota
mPaletted
mGray
mGrayInvert
mRGB
mRGBA
mNRGBA
mCMYK
)
// CompressionType describes the type of compression used in Options.
type CompressionType int
// Constants for supported compression types.
const (
Uncompressed CompressionType = iota
Deflate
LZW
CCITTGroup3
CCITTGroup4
)
// specValue returns the compression type constant from the TIFF spec that
// is equivalent to c.
func (c CompressionType) specValue() uint32 {
switch c {
case LZW:
return cLZW
case Deflate:
return cDeflate
case CCITTGroup3:
return cG3
case CCITTGroup4:
return cG4
}
return cNone
}

29
vendor/golang.org/x/image/tiff/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gofuzz
package tiff
import "bytes"
func Fuzz(data []byte) int {
cfg, err := DecodeConfig(bytes.NewReader(data))
if err != nil {
return 0
}
if cfg.Width*cfg.Height > 1e6 {
return 0
}
img, err := Decode(bytes.NewReader(data))
if err != nil {
return 0
}
var w bytes.Buffer
err = Encode(&w, img, nil)
if err != nil {
panic(err)
}
return 1
}

272
vendor/golang.org/x/image/tiff/lzw/reader.go generated vendored Normal file
View File

@ -0,0 +1,272 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lzw implements the Lempel-Ziv-Welch compressed data format,
// described in T. A. Welch, ``A Technique for High-Performance Data
// Compression'', Computer, 17(6) (June 1984), pp 8-19.
//
// In particular, it implements LZW as used by the TIFF file format, including
// an "off by one" algorithmic difference when compared to standard LZW.
package lzw // import "golang.org/x/image/tiff/lzw"
/*
This file was branched from src/pkg/compress/lzw/reader.go in the
standard library. Differences from the original are marked with "NOTE".
The tif_lzw.c file in the libtiff C library has this comment:
----
The 5.0 spec describes a different algorithm than Aldus
implements. Specifically, Aldus does code length transitions
one code earlier than should be done (for real LZW).
Earlier versions of this library implemented the correct
LZW algorithm, but emitted codes in a bit order opposite
to the TIFF spec. Thus, to maintain compatibility w/ Aldus
we interpret MSB-LSB ordered codes to be images written w/
old versions of this library, but otherwise adhere to the
Aldus "off by one" algorithm.
----
The Go code doesn't read (invalid) TIFF files written by old versions of
libtiff, but the LZW algorithm in this package still differs from the one in
Go's standard package library to accomodate this "off by one" in valid TIFFs.
*/
import (
"bufio"
"errors"
"fmt"
"io"
)
// Order specifies the bit ordering in an LZW data stream.
type Order int
const (
// LSB means Least Significant Bits first, as used in the GIF file format.
LSB Order = iota
// MSB means Most Significant Bits first, as used in the TIFF and PDF
// file formats.
MSB
)
const (
maxWidth = 12
decoderInvalidCode = 0xffff
flushBuffer = 1 << maxWidth
)
// decoder is the state from which the readXxx method converts a byte
// stream into a code stream.
type decoder struct {
r io.ByteReader
bits uint32
nBits uint
width uint
read func(*decoder) (uint16, error) // readLSB or readMSB
litWidth int // width in bits of literal codes
err error
// The first 1<<litWidth codes are literal codes.
// The next two codes mean clear and EOF.
// Other valid codes are in the range [lo, hi] where lo := clear + 2,
// with the upper bound incrementing on each code seen.
// overflow is the code at which hi overflows the code width. NOTE: TIFF's LZW is "off by one".
// last is the most recently seen code, or decoderInvalidCode.
clear, eof, hi, overflow, last uint16
// Each code c in [lo, hi] expands to two or more bytes. For c != hi:
// suffix[c] is the last of these bytes.
// prefix[c] is the code for all but the last byte.
// This code can either be a literal code or another code in [lo, c).
// The c == hi case is a special case.
suffix [1 << maxWidth]uint8
prefix [1 << maxWidth]uint16
// output is the temporary output buffer.
// Literal codes are accumulated from the start of the buffer.
// Non-literal codes decode to a sequence of suffixes that are first
// written right-to-left from the end of the buffer before being copied
// to the start of the buffer.
// It is flushed when it contains >= 1<<maxWidth bytes,
// so that there is always room to decode an entire code.
output [2 * 1 << maxWidth]byte
o int // write index into output
toRead []byte // bytes to return from Read
}
// readLSB returns the next code for "Least Significant Bits first" data.
func (d *decoder) readLSB() (uint16, error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
return 0, err
}
d.bits |= uint32(x) << d.nBits
d.nBits += 8
}
code := uint16(d.bits & (1<<d.width - 1))
d.bits >>= d.width
d.nBits -= d.width
return code, nil
}
// readMSB returns the next code for "Most Significant Bits first" data.
func (d *decoder) readMSB() (uint16, error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
return 0, err
}
d.bits |= uint32(x) << (24 - d.nBits)
d.nBits += 8
}
code := uint16(d.bits >> (32 - d.width))
d.bits <<= d.width
d.nBits -= d.width
return code, nil
}
func (d *decoder) Read(b []byte) (int, error) {
for {
if len(d.toRead) > 0 {
n := copy(b, d.toRead)
d.toRead = d.toRead[n:]
return n, nil
}
if d.err != nil {
return 0, d.err
}
d.decode()
}
}
// decode decompresses bytes from r and leaves them in d.toRead.
// read specifies how to decode bytes into codes.
// litWidth is the width in bits of literal codes.
func (d *decoder) decode() {
// Loop over the code stream, converting codes into decompressed bytes.
loop:
for {
code, err := d.read(d)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
d.err = err
break
}
switch {
case code < d.clear:
// We have a literal code.
d.output[d.o] = uint8(code)
d.o++
if d.last != decoderInvalidCode {
// Save what the hi code expands to.
d.suffix[d.hi] = uint8(code)
d.prefix[d.hi] = d.last
}
case code == d.clear:
d.width = 1 + uint(d.litWidth)
d.hi = d.eof
d.overflow = 1 << d.width
d.last = decoderInvalidCode
continue
case code == d.eof:
d.err = io.EOF
break loop
case code <= d.hi:
c, i := code, len(d.output)-1
if code == d.hi && d.last != decoderInvalidCode {
// code == hi is a special case which expands to the last expansion
// followed by the head of the last expansion. To find the head, we walk
// the prefix chain until we find a literal code.
c = d.last
for c >= d.clear {
c = d.prefix[c]
}
d.output[i] = uint8(c)
i--
c = d.last
}
// Copy the suffix chain into output and then write that to w.
for c >= d.clear {
d.output[i] = d.suffix[c]
i--
c = d.prefix[c]
}
d.output[i] = uint8(c)
d.o += copy(d.output[d.o:], d.output[i:])
if d.last != decoderInvalidCode {
// Save what the hi code expands to.
d.suffix[d.hi] = uint8(c)
d.prefix[d.hi] = d.last
}
default:
d.err = errors.New("lzw: invalid code")
break loop
}
d.last, d.hi = code, d.hi+1
if d.hi+1 >= d.overflow { // NOTE: the "+1" is where TIFF's LZW differs from the standard algorithm.
if d.width == maxWidth {
d.last = decoderInvalidCode
} else {
d.width++
d.overflow <<= 1
}
}
if d.o >= flushBuffer {
break
}
}
// Flush pending output.
d.toRead = d.output[:d.o]
d.o = 0
}
var errClosed = errors.New("lzw: reader/writer is closed")
func (d *decoder) Close() error {
d.err = errClosed // in case any Reads come along
return nil
}
// NewReader creates a new io.ReadCloser.
// Reads from the returned io.ReadCloser read and decompress data from r.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser when
// finished reading.
// The number of bits to use for literal codes, litWidth, must be in the
// range [2,8] and is typically 8. It must equal the litWidth
// used during compression.
func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {
d := new(decoder)
switch order {
case LSB:
d.read = (*decoder).readLSB
case MSB:
d.read = (*decoder).readMSB
default:
d.err = errors.New("lzw: unknown order")
return d
}
if litWidth < 2 || 8 < litWidth {
d.err = fmt.Errorf("lzw: litWidth %d out of range", litWidth)
return d
}
if br, ok := r.(io.ByteReader); ok {
d.r = br
} else {
d.r = bufio.NewReader(r)
}
d.litWidth = litWidth
d.width = 1 + uint(litWidth)
d.clear = uint16(1) << uint(litWidth)
d.eof, d.hi = d.clear+1, d.clear+1
d.overflow = uint16(1) << d.width
d.last = decoderInvalidCode
return d
}

706
vendor/golang.org/x/image/tiff/reader.go generated vendored Normal file
View File

@ -0,0 +1,706 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tiff implements a TIFF image decoder and encoder.
//
// The TIFF specification is at http://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
package tiff // import "golang.org/x/image/tiff"
import (
"compress/zlib"
"encoding/binary"
"fmt"
"image"
"image/color"
"io"
"io/ioutil"
"math"
"golang.org/x/image/ccitt"
"golang.org/x/image/tiff/lzw"
)
// A FormatError reports that the input is not a valid TIFF image.
type FormatError string
func (e FormatError) Error() string {
return "tiff: invalid format: " + string(e)
}
// An UnsupportedError reports that the input uses a valid but
// unimplemented feature.
type UnsupportedError string
func (e UnsupportedError) Error() string {
return "tiff: unsupported feature: " + string(e)
}
var errNoPixels = FormatError("not enough pixel data")
type decoder struct {
r io.ReaderAt
byteOrder binary.ByteOrder
config image.Config
mode imageMode
bpp uint
features map[int][]uint
palette []color.Color
buf []byte
off int // Current offset in buf.
v uint32 // Buffer value for reading with arbitrary bit depths.
nbits uint // Remaining number of bits in v.
}
// firstVal returns the first uint of the features entry with the given tag,
// or 0 if the tag does not exist.
func (d *decoder) firstVal(tag int) uint {
f := d.features[tag]
if len(f) == 0 {
return 0
}
return f[0]
}
// ifdUint decodes the IFD entry in p, which must be of the Byte, Short
// or Long type, and returns the decoded uint values.
func (d *decoder) ifdUint(p []byte) (u []uint, err error) {
var raw []byte
if len(p) < ifdLen {
return nil, FormatError("bad IFD entry")
}
datatype := d.byteOrder.Uint16(p[2:4])
if dt := int(datatype); dt <= 0 || dt >= len(lengths) {
return nil, UnsupportedError("IFD entry datatype")
}
count := d.byteOrder.Uint32(p[4:8])
if count > math.MaxInt32/lengths[datatype] {
return nil, FormatError("IFD data too large")
}
if datalen := lengths[datatype] * count; datalen > 4 {
// The IFD contains a pointer to the real value.
raw = make([]byte, datalen)
_, err = d.r.ReadAt(raw, int64(d.byteOrder.Uint32(p[8:12])))
} else {
raw = p[8 : 8+datalen]
}
if err != nil {
return nil, err
}
u = make([]uint, count)
switch datatype {
case dtByte:
for i := uint32(0); i < count; i++ {
u[i] = uint(raw[i])
}
case dtShort:
for i := uint32(0); i < count; i++ {
u[i] = uint(d.byteOrder.Uint16(raw[2*i : 2*(i+1)]))
}
case dtLong:
for i := uint32(0); i < count; i++ {
u[i] = uint(d.byteOrder.Uint32(raw[4*i : 4*(i+1)]))
}
default:
return nil, UnsupportedError("data type")
}
return u, nil
}
// parseIFD decides whether the IFD entry in p is "interesting" and
// stows away the data in the decoder. It returns the tag number of the
// entry and an error, if any.
func (d *decoder) parseIFD(p []byte) (int, error) {
tag := d.byteOrder.Uint16(p[0:2])
switch tag {
case tBitsPerSample,
tExtraSamples,
tPhotometricInterpretation,
tCompression,
tPredictor,
tStripOffsets,
tStripByteCounts,
tRowsPerStrip,
tTileWidth,
tTileLength,
tTileOffsets,
tTileByteCounts,
tImageLength,
tImageWidth,
tFillOrder,
tT4Options,
tT6Options:
val, err := d.ifdUint(p)
if err != nil {
return 0, err
}
d.features[int(tag)] = val
case tColorMap:
val, err := d.ifdUint(p)
if err != nil {
return 0, err
}
numcolors := len(val) / 3
if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 {
return 0, FormatError("bad ColorMap length")
}
d.palette = make([]color.Color, numcolors)
for i := 0; i < numcolors; i++ {
d.palette[i] = color.RGBA64{
uint16(val[i]),
uint16(val[i+numcolors]),
uint16(val[i+2*numcolors]),
0xffff,
}
}
case tSampleFormat:
// Page 27 of the spec: If the SampleFormat is present and
// the value is not 1 [= unsigned integer data], a Baseline
// TIFF reader that cannot handle the SampleFormat value
// must terminate the import process gracefully.
val, err := d.ifdUint(p)
if err != nil {
return 0, err
}
for _, v := range val {
if v != 1 {
return 0, UnsupportedError("sample format")
}
}
}
return int(tag), nil
}
// readBits reads n bits from the internal buffer starting at the current offset.
func (d *decoder) readBits(n uint) (v uint32, ok bool) {
for d.nbits < n {
d.v <<= 8
if d.off >= len(d.buf) {
return 0, false
}
d.v |= uint32(d.buf[d.off])
d.off++
d.nbits += 8
}
d.nbits -= n
rv := d.v >> d.nbits
d.v &^= rv << d.nbits
return rv, true
}
// flushBits discards the unread bits in the buffer used by readBits.
// It is used at the end of a line.
func (d *decoder) flushBits() {
d.v = 0
d.nbits = 0
}
// minInt returns the smaller of x or y.
func minInt(a, b int) int {
if a <= b {
return a
}
return b
}
// decode decodes the raw data of an image.
// It reads from d.buf and writes the strip or tile into dst.
func (d *decoder) decode(dst image.Image, xmin, ymin, xmax, ymax int) error {
d.off = 0
// Apply horizontal predictor if necessary.
// In this case, p contains the color difference to the preceding pixel.
// See page 64-65 of the spec.
if d.firstVal(tPredictor) == prHorizontal {
switch d.bpp {
case 16:
var off int
n := 2 * len(d.features[tBitsPerSample]) // bytes per sample times samples per pixel
for y := ymin; y < ymax; y++ {
off += n
for x := 0; x < (xmax-xmin-1)*n; x += 2 {
if off+2 > len(d.buf) {
return errNoPixels
}
v0 := d.byteOrder.Uint16(d.buf[off-n : off-n+2])
v1 := d.byteOrder.Uint16(d.buf[off : off+2])
d.byteOrder.PutUint16(d.buf[off:off+2], v1+v0)
off += 2
}
}
case 8:
var off int
n := 1 * len(d.features[tBitsPerSample]) // bytes per sample times samples per pixel
for y := ymin; y < ymax; y++ {
off += n
for x := 0; x < (xmax-xmin-1)*n; x++ {
if off >= len(d.buf) {
return errNoPixels
}
d.buf[off] += d.buf[off-n]
off++
}
}
case 1:
return UnsupportedError("horizontal predictor with 1 BitsPerSample")
}
}
rMaxX := minInt(xmax, dst.Bounds().Max.X)
rMaxY := minInt(ymax, dst.Bounds().Max.Y)
switch d.mode {
case mGray, mGrayInvert:
if d.bpp == 16 {
img := dst.(*image.Gray16)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
if d.off+2 > len(d.buf) {
return errNoPixels
}
v := d.byteOrder.Uint16(d.buf[d.off : d.off+2])
d.off += 2
if d.mode == mGrayInvert {
v = 0xffff - v
}
img.SetGray16(x, y, color.Gray16{v})
}
if rMaxX == img.Bounds().Max.X {
d.off += 2 * (xmax - img.Bounds().Max.X)
}
}
} else {
img := dst.(*image.Gray)
max := uint32((1 << d.bpp) - 1)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
v, ok := d.readBits(d.bpp)
if !ok {
return errNoPixels
}
v = v * 0xff / max
if d.mode == mGrayInvert {
v = 0xff - v
}
img.SetGray(x, y, color.Gray{uint8(v)})
}
d.flushBits()
}
}
case mPaletted:
img := dst.(*image.Paletted)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
v, ok := d.readBits(d.bpp)
if !ok {
return errNoPixels
}
img.SetColorIndex(x, y, uint8(v))
}
d.flushBits()
}
case mRGB:
if d.bpp == 16 {
img := dst.(*image.RGBA64)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
if d.off+6 > len(d.buf) {
return errNoPixels
}
r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2])
g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4])
b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6])
d.off += 6
img.SetRGBA64(x, y, color.RGBA64{r, g, b, 0xffff})
}
}
} else {
img := dst.(*image.RGBA)
for y := ymin; y < rMaxY; y++ {
min := img.PixOffset(xmin, y)
max := img.PixOffset(rMaxX, y)
off := (y - ymin) * (xmax - xmin) * 3
for i := min; i < max; i += 4 {
if off+3 > len(d.buf) {
return errNoPixels
}
img.Pix[i+0] = d.buf[off+0]
img.Pix[i+1] = d.buf[off+1]
img.Pix[i+2] = d.buf[off+2]
img.Pix[i+3] = 0xff
off += 3
}
}
}
case mNRGBA:
if d.bpp == 16 {
img := dst.(*image.NRGBA64)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
if d.off+8 > len(d.buf) {
return errNoPixels
}
r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2])
g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4])
b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6])
a := d.byteOrder.Uint16(d.buf[d.off+6 : d.off+8])
d.off += 8
img.SetNRGBA64(x, y, color.NRGBA64{r, g, b, a})
}
}
} else {
img := dst.(*image.NRGBA)
for y := ymin; y < rMaxY; y++ {
min := img.PixOffset(xmin, y)
max := img.PixOffset(rMaxX, y)
i0, i1 := (y-ymin)*(xmax-xmin)*4, (y-ymin+1)*(xmax-xmin)*4
if i1 > len(d.buf) {
return errNoPixels
}
copy(img.Pix[min:max], d.buf[i0:i1])
}
}
case mRGBA:
if d.bpp == 16 {
img := dst.(*image.RGBA64)
for y := ymin; y < rMaxY; y++ {
for x := xmin; x < rMaxX; x++ {
if d.off+8 > len(d.buf) {
return errNoPixels
}
r := d.byteOrder.Uint16(d.buf[d.off+0 : d.off+2])
g := d.byteOrder.Uint16(d.buf[d.off+2 : d.off+4])
b := d.byteOrder.Uint16(d.buf[d.off+4 : d.off+6])
a := d.byteOrder.Uint16(d.buf[d.off+6 : d.off+8])
d.off += 8
img.SetRGBA64(x, y, color.RGBA64{r, g, b, a})
}
}
} else {
img := dst.(*image.RGBA)
for y := ymin; y < rMaxY; y++ {
min := img.PixOffset(xmin, y)
max := img.PixOffset(rMaxX, y)
i0, i1 := (y-ymin)*(xmax-xmin)*4, (y-ymin+1)*(xmax-xmin)*4
if i1 > len(d.buf) {
return errNoPixels
}
copy(img.Pix[min:max], d.buf[i0:i1])
}
}
}
return nil
}
func newDecoder(r io.Reader) (*decoder, error) {
d := &decoder{
r: newReaderAt(r),
features: make(map[int][]uint),
}
p := make([]byte, 8)
if _, err := d.r.ReadAt(p, 0); err != nil {
return nil, err
}
switch string(p[0:4]) {
case leHeader:
d.byteOrder = binary.LittleEndian
case beHeader:
d.byteOrder = binary.BigEndian
default:
return nil, FormatError("malformed header")
}
ifdOffset := int64(d.byteOrder.Uint32(p[4:8]))
// The first two bytes contain the number of entries (12 bytes each).
if _, err := d.r.ReadAt(p[0:2], ifdOffset); err != nil {
return nil, err
}
numItems := int(d.byteOrder.Uint16(p[0:2]))
// All IFD entries are read in one chunk.
p = make([]byte, ifdLen*numItems)
if _, err := d.r.ReadAt(p, ifdOffset+2); err != nil {
return nil, err
}
prevTag := -1
for i := 0; i < len(p); i += ifdLen {
tag, err := d.parseIFD(p[i : i+ifdLen])
if err != nil {
return nil, err
}
if tag <= prevTag {
return nil, FormatError("tags are not sorted in ascending order")
}
prevTag = tag
}
d.config.Width = int(d.firstVal(tImageWidth))
d.config.Height = int(d.firstVal(tImageLength))
if _, ok := d.features[tBitsPerSample]; !ok {
// Default is 1 per specification.
d.features[tBitsPerSample] = []uint{1}
}
d.bpp = d.firstVal(tBitsPerSample)
switch d.bpp {
case 0:
return nil, FormatError("BitsPerSample must not be 0")
case 1, 8, 16:
// Nothing to do, these are accepted by this implementation.
default:
return nil, UnsupportedError(fmt.Sprintf("BitsPerSample of %v", d.bpp))
}
// Determine the image mode.
switch d.firstVal(tPhotometricInterpretation) {
case pRGB:
if d.bpp == 16 {
for _, b := range d.features[tBitsPerSample] {
if b != 16 {
return nil, FormatError("wrong number of samples for 16bit RGB")
}
}
} else {
for _, b := range d.features[tBitsPerSample] {
if b != 8 {
return nil, FormatError("wrong number of samples for 8bit RGB")
}
}
}
// RGB images normally have 3 samples per pixel.
// If there are more, ExtraSamples (p. 31-32 of the spec)
// gives their meaning (usually an alpha channel).
//
// This implementation does not support extra samples
// of an unspecified type.
switch len(d.features[tBitsPerSample]) {
case 3:
d.mode = mRGB
if d.bpp == 16 {
d.config.ColorModel = color.RGBA64Model
} else {
d.config.ColorModel = color.RGBAModel
}
case 4:
switch d.firstVal(tExtraSamples) {
case 1:
d.mode = mRGBA
if d.bpp == 16 {
d.config.ColorModel = color.RGBA64Model
} else {
d.config.ColorModel = color.RGBAModel
}
case 2:
d.mode = mNRGBA
if d.bpp == 16 {
d.config.ColorModel = color.NRGBA64Model
} else {
d.config.ColorModel = color.NRGBAModel
}
default:
return nil, FormatError("wrong number of samples for RGB")
}
default:
return nil, FormatError("wrong number of samples for RGB")
}
case pPaletted:
d.mode = mPaletted
d.config.ColorModel = color.Palette(d.palette)
case pWhiteIsZero:
d.mode = mGrayInvert
if d.bpp == 16 {
d.config.ColorModel = color.Gray16Model
} else {
d.config.ColorModel = color.GrayModel
}
case pBlackIsZero:
d.mode = mGray
if d.bpp == 16 {
d.config.ColorModel = color.Gray16Model
} else {
d.config.ColorModel = color.GrayModel
}
default:
return nil, UnsupportedError("color model")
}
return d, nil
}
// DecodeConfig returns the color model and dimensions of a TIFF image without
// decoding the entire image.
func DecodeConfig(r io.Reader) (image.Config, error) {
d, err := newDecoder(r)
if err != nil {
return image.Config{}, err
}
return d.config, nil
}
func ccittFillOrder(tiffFillOrder uint) ccitt.Order {
if tiffFillOrder == 2 {
return ccitt.LSB
}
return ccitt.MSB
}
// Decode reads a TIFF image from r and returns it as an image.Image.
// The type of Image returned depends on the contents of the TIFF.
func Decode(r io.Reader) (img image.Image, err error) {
d, err := newDecoder(r)
if err != nil {
return
}
blockPadding := false
blockWidth := d.config.Width
blockHeight := d.config.Height
blocksAcross := 1
blocksDown := 1
if d.config.Width == 0 {
blocksAcross = 0
}
if d.config.Height == 0 {
blocksDown = 0
}
var blockOffsets, blockCounts []uint
if int(d.firstVal(tTileWidth)) != 0 {
blockPadding = true
blockWidth = int(d.firstVal(tTileWidth))
blockHeight = int(d.firstVal(tTileLength))
if blockWidth != 0 {
blocksAcross = (d.config.Width + blockWidth - 1) / blockWidth
}
if blockHeight != 0 {
blocksDown = (d.config.Height + blockHeight - 1) / blockHeight
}
blockCounts = d.features[tTileByteCounts]
blockOffsets = d.features[tTileOffsets]
} else {
if int(d.firstVal(tRowsPerStrip)) != 0 {
blockHeight = int(d.firstVal(tRowsPerStrip))
}
if blockHeight != 0 {
blocksDown = (d.config.Height + blockHeight - 1) / blockHeight
}
blockOffsets = d.features[tStripOffsets]
blockCounts = d.features[tStripByteCounts]
}
// Check if we have the right number of strips/tiles, offsets and counts.
if n := blocksAcross * blocksDown; len(blockOffsets) < n || len(blockCounts) < n {
return nil, FormatError("inconsistent header")
}
imgRect := image.Rect(0, 0, d.config.Width, d.config.Height)
switch d.mode {
case mGray, mGrayInvert:
if d.bpp == 16 {
img = image.NewGray16(imgRect)
} else {
img = image.NewGray(imgRect)
}
case mPaletted:
img = image.NewPaletted(imgRect, d.palette)
case mNRGBA:
if d.bpp == 16 {
img = image.NewNRGBA64(imgRect)
} else {
img = image.NewNRGBA(imgRect)
}
case mRGB, mRGBA:
if d.bpp == 16 {
img = image.NewRGBA64(imgRect)
} else {
img = image.NewRGBA(imgRect)
}
}
for i := 0; i < blocksAcross; i++ {
blkW := blockWidth
if !blockPadding && i == blocksAcross-1 && d.config.Width%blockWidth != 0 {
blkW = d.config.Width % blockWidth
}
for j := 0; j < blocksDown; j++ {
blkH := blockHeight
if !blockPadding && j == blocksDown-1 && d.config.Height%blockHeight != 0 {
blkH = d.config.Height % blockHeight
}
offset := int64(blockOffsets[j*blocksAcross+i])
n := int64(blockCounts[j*blocksAcross+i])
switch d.firstVal(tCompression) {
// According to the spec, Compression does not have a default value,
// but some tools interpret a missing Compression value as none so we do
// the same.
case cNone, 0:
if b, ok := d.r.(*buffer); ok {
d.buf, err = b.Slice(int(offset), int(n))
} else {
d.buf = make([]byte, n)
_, err = d.r.ReadAt(d.buf, offset)
}
case cG3:
inv := d.firstVal(tPhotometricInterpretation) == pWhiteIsZero
order := ccittFillOrder(d.firstVal(tFillOrder))
r := ccitt.NewReader(io.NewSectionReader(d.r, offset, n), order, ccitt.Group3, blkW, blkH, &ccitt.Options{Invert: inv, Align: false})
d.buf, err = ioutil.ReadAll(r)
case cG4:
inv := d.firstVal(tPhotometricInterpretation) == pWhiteIsZero
order := ccittFillOrder(d.firstVal(tFillOrder))
r := ccitt.NewReader(io.NewSectionReader(d.r, offset, n), order, ccitt.Group4, blkW, blkH, &ccitt.Options{Invert: inv, Align: false})
d.buf, err = ioutil.ReadAll(r)
case cLZW:
r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
d.buf, err = ioutil.ReadAll(r)
r.Close()
case cDeflate, cDeflateOld:
var r io.ReadCloser
r, err = zlib.NewReader(io.NewSectionReader(d.r, offset, n))
if err != nil {
return nil, err
}
d.buf, err = ioutil.ReadAll(r)
r.Close()
case cPackBits:
d.buf, err = unpackBits(io.NewSectionReader(d.r, offset, n))
default:
err = UnsupportedError(fmt.Sprintf("compression value %d", d.firstVal(tCompression)))
}
if err != nil {
return nil, err
}
xmin := i * blockWidth
ymin := j * blockHeight
xmax := xmin + blkW
ymax := ymin + blkH
err = d.decode(img, xmin, ymin, xmax, ymax)
if err != nil {
return nil, err
}
}
}
return
}
func init() {
image.RegisterFormat("tiff", leHeader, Decode, DecodeConfig)
image.RegisterFormat("tiff", beHeader, Decode, DecodeConfig)
}

438
vendor/golang.org/x/image/tiff/writer.go generated vendored Normal file
View File

@ -0,0 +1,438 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tiff
import (
"bytes"
"compress/zlib"
"encoding/binary"
"image"
"io"
"sort"
)
// The TIFF format allows to choose the order of the different elements freely.
// The basic structure of a TIFF file written by this package is:
//
// 1. Header (8 bytes).
// 2. Image data.
// 3. Image File Directory (IFD).
// 4. "Pointer area" for larger entries in the IFD.
// We only write little-endian TIFF files.
var enc = binary.LittleEndian
// An ifdEntry is a single entry in an Image File Directory.
// A value of type dtRational is composed of two 32-bit values,
// thus data contains two uints (numerator and denominator) for a single number.
type ifdEntry struct {
tag int
datatype int
data []uint32
}
func (e ifdEntry) putData(p []byte) {
for _, d := range e.data {
switch e.datatype {
case dtByte, dtASCII:
p[0] = byte(d)
p = p[1:]
case dtShort:
enc.PutUint16(p, uint16(d))
p = p[2:]
case dtLong, dtRational:
enc.PutUint32(p, uint32(d))
p = p[4:]
}
}
}
type byTag []ifdEntry
func (d byTag) Len() int { return len(d) }
func (d byTag) Less(i, j int) bool { return d[i].tag < d[j].tag }
func (d byTag) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func encodeGray(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error {
if !predictor {
return writePix(w, pix, dy, dx, stride)
}
buf := make([]byte, dx)
for y := 0; y < dy; y++ {
min := y*stride + 0
max := y*stride + dx
off := 0
var v0 uint8
for i := min; i < max; i++ {
v1 := pix[i]
buf[off] = v1 - v0
v0 = v1
off++
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encodeGray16(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error {
buf := make([]byte, dx*2)
for y := 0; y < dy; y++ {
min := y*stride + 0
max := y*stride + dx*2
off := 0
var v0 uint16
for i := min; i < max; i += 2 {
// An image.Gray16's Pix is in big-endian order.
v1 := uint16(pix[i])<<8 | uint16(pix[i+1])
if predictor {
v0, v1 = v1, v1-v0
}
// We only write little-endian TIFF files.
buf[off+0] = byte(v1)
buf[off+1] = byte(v1 >> 8)
off += 2
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error {
if !predictor {
return writePix(w, pix, dy, dx*4, stride)
}
buf := make([]byte, dx*4)
for y := 0; y < dy; y++ {
min := y*stride + 0
max := y*stride + dx*4
off := 0
var r0, g0, b0, a0 uint8
for i := min; i < max; i += 4 {
r1, g1, b1, a1 := pix[i+0], pix[i+1], pix[i+2], pix[i+3]
buf[off+0] = r1 - r0
buf[off+1] = g1 - g0
buf[off+2] = b1 - b0
buf[off+3] = a1 - a0
off += 4
r0, g0, b0, a0 = r1, g1, b1, a1
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encodeRGBA64(w io.Writer, pix []uint8, dx, dy, stride int, predictor bool) error {
buf := make([]byte, dx*8)
for y := 0; y < dy; y++ {
min := y*stride + 0
max := y*stride + dx*8
off := 0
var r0, g0, b0, a0 uint16
for i := min; i < max; i += 8 {
// An image.RGBA64's Pix is in big-endian order.
r1 := uint16(pix[i+0])<<8 | uint16(pix[i+1])
g1 := uint16(pix[i+2])<<8 | uint16(pix[i+3])
b1 := uint16(pix[i+4])<<8 | uint16(pix[i+5])
a1 := uint16(pix[i+6])<<8 | uint16(pix[i+7])
if predictor {
r0, r1 = r1, r1-r0
g0, g1 = g1, g1-g0
b0, b1 = b1, b1-b0
a0, a1 = a1, a1-a0
}
// We only write little-endian TIFF files.
buf[off+0] = byte(r1)
buf[off+1] = byte(r1 >> 8)
buf[off+2] = byte(g1)
buf[off+3] = byte(g1 >> 8)
buf[off+4] = byte(b1)
buf[off+5] = byte(b1 >> 8)
buf[off+6] = byte(a1)
buf[off+7] = byte(a1 >> 8)
off += 8
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
func encode(w io.Writer, m image.Image, predictor bool) error {
bounds := m.Bounds()
buf := make([]byte, 4*bounds.Dx())
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
off := 0
if predictor {
var r0, g0, b0, a0 uint8
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r, g, b, a := m.At(x, y).RGBA()
r1 := uint8(r >> 8)
g1 := uint8(g >> 8)
b1 := uint8(b >> 8)
a1 := uint8(a >> 8)
buf[off+0] = r1 - r0
buf[off+1] = g1 - g0
buf[off+2] = b1 - b0
buf[off+3] = a1 - a0
off += 4
r0, g0, b0, a0 = r1, g1, b1, a1
}
} else {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r, g, b, a := m.At(x, y).RGBA()
buf[off+0] = uint8(r >> 8)
buf[off+1] = uint8(g >> 8)
buf[off+2] = uint8(b >> 8)
buf[off+3] = uint8(a >> 8)
off += 4
}
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
// writePix writes the internal byte array of an image to w. It is less general
// but much faster then encode. writePix is used when pix directly
// corresponds to one of the TIFF image types.
func writePix(w io.Writer, pix []byte, nrows, length, stride int) error {
if length == stride {
_, err := w.Write(pix[:nrows*length])
return err
}
for ; nrows > 0; nrows-- {
if _, err := w.Write(pix[:length]); err != nil {
return err
}
pix = pix[stride:]
}
return nil
}
func writeIFD(w io.Writer, ifdOffset int, d []ifdEntry) error {
var buf [ifdLen]byte
// Make space for "pointer area" containing IFD entry data
// longer than 4 bytes.
parea := make([]byte, 1024)
pstart := ifdOffset + ifdLen*len(d) + 6
var o int // Current offset in parea.
// The IFD has to be written with the tags in ascending order.
sort.Sort(byTag(d))
// Write the number of entries in this IFD.
if err := binary.Write(w, enc, uint16(len(d))); err != nil {
return err
}
for _, ent := range d {
enc.PutUint16(buf[0:2], uint16(ent.tag))
enc.PutUint16(buf[2:4], uint16(ent.datatype))
count := uint32(len(ent.data))
if ent.datatype == dtRational {
count /= 2
}
enc.PutUint32(buf[4:8], count)
datalen := int(count * lengths[ent.datatype])
if datalen <= 4 {
ent.putData(buf[8:12])
} else {
if (o + datalen) > len(parea) {
newlen := len(parea) + 1024
for (o + datalen) > newlen {
newlen += 1024
}
newarea := make([]byte, newlen)
copy(newarea, parea)
parea = newarea
}
ent.putData(parea[o : o+datalen])
enc.PutUint32(buf[8:12], uint32(pstart+o))
o += datalen
}
if _, err := w.Write(buf[:]); err != nil {
return err
}
}
// The IFD ends with the offset of the next IFD in the file,
// or zero if it is the last one (page 14).
if err := binary.Write(w, enc, uint32(0)); err != nil {
return err
}
_, err := w.Write(parea[:o])
return err
}
// Options are the encoding parameters.
type Options struct {
// Compression is the type of compression used.
Compression CompressionType
// Predictor determines whether a differencing predictor is used;
// if true, instead of each pixel's color, the color difference to the
// preceding one is saved. This improves the compression for certain
// types of images and compressors. For example, it works well for
// photos with Deflate compression.
Predictor bool
}
// Encode writes the image m to w. opt determines the options used for
// encoding, such as the compression type. If opt is nil, an uncompressed
// image is written.
func Encode(w io.Writer, m image.Image, opt *Options) error {
d := m.Bounds().Size()
compression := uint32(cNone)
predictor := false
if opt != nil {
compression = opt.Compression.specValue()
// The predictor field is only used with LZW. See page 64 of the spec.
predictor = opt.Predictor && compression == cLZW
}
_, err := io.WriteString(w, leHeader)
if err != nil {
return err
}
// Compressed data is written into a buffer first, so that we
// know the compressed size.
var buf bytes.Buffer
// dst holds the destination for the pixel data of the image --
// either w or a writer to buf.
var dst io.Writer
// imageLen is the length of the pixel data in bytes.
// The offset of the IFD is imageLen + 8 header bytes.
var imageLen int
switch compression {
case cNone:
dst = w
// Write IFD offset before outputting pixel data.
switch m.(type) {
case *image.Paletted:
imageLen = d.X * d.Y * 1
case *image.Gray:
imageLen = d.X * d.Y * 1
case *image.Gray16:
imageLen = d.X * d.Y * 2
case *image.RGBA64:
imageLen = d.X * d.Y * 8
case *image.NRGBA64:
imageLen = d.X * d.Y * 8
default:
imageLen = d.X * d.Y * 4
}
err = binary.Write(w, enc, uint32(imageLen+8))
if err != nil {
return err
}
case cDeflate:
dst = zlib.NewWriter(&buf)
}
pr := uint32(prNone)
photometricInterpretation := uint32(pRGB)
samplesPerPixel := uint32(4)
bitsPerSample := []uint32{8, 8, 8, 8}
extraSamples := uint32(0)
colorMap := []uint32{}
if predictor {
pr = prHorizontal
}
switch m := m.(type) {
case *image.Paletted:
photometricInterpretation = pPaletted
samplesPerPixel = 1
bitsPerSample = []uint32{8}
colorMap = make([]uint32, 256*3)
for i := 0; i < 256 && i < len(m.Palette); i++ {
r, g, b, _ := m.Palette[i].RGBA()
colorMap[i+0*256] = uint32(r)
colorMap[i+1*256] = uint32(g)
colorMap[i+2*256] = uint32(b)
}
err = encodeGray(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.Gray:
photometricInterpretation = pBlackIsZero
samplesPerPixel = 1
bitsPerSample = []uint32{8}
err = encodeGray(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.Gray16:
photometricInterpretation = pBlackIsZero
samplesPerPixel = 1
bitsPerSample = []uint32{16}
err = encodeGray16(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.NRGBA:
extraSamples = 2 // Unassociated alpha.
err = encodeRGBA(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.NRGBA64:
extraSamples = 2 // Unassociated alpha.
bitsPerSample = []uint32{16, 16, 16, 16}
err = encodeRGBA64(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.RGBA:
extraSamples = 1 // Associated alpha.
err = encodeRGBA(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
case *image.RGBA64:
extraSamples = 1 // Associated alpha.
bitsPerSample = []uint32{16, 16, 16, 16}
err = encodeRGBA64(dst, m.Pix, d.X, d.Y, m.Stride, predictor)
default:
extraSamples = 1 // Associated alpha.
err = encode(dst, m, predictor)
}
if err != nil {
return err
}
if compression != cNone {
if err = dst.(io.Closer).Close(); err != nil {
return err
}
imageLen = buf.Len()
if err = binary.Write(w, enc, uint32(imageLen+8)); err != nil {
return err
}
if _, err = buf.WriteTo(w); err != nil {
return err
}
}
ifd := []ifdEntry{
{tImageWidth, dtShort, []uint32{uint32(d.X)}},
{tImageLength, dtShort, []uint32{uint32(d.Y)}},
{tBitsPerSample, dtShort, bitsPerSample},
{tCompression, dtShort, []uint32{compression}},
{tPhotometricInterpretation, dtShort, []uint32{photometricInterpretation}},
{tStripOffsets, dtLong, []uint32{8}},
{tSamplesPerPixel, dtShort, []uint32{samplesPerPixel}},
{tRowsPerStrip, dtShort, []uint32{uint32(d.Y)}},
{tStripByteCounts, dtLong, []uint32{uint32(imageLen)}},
// There is currently no support for storing the image
// resolution, so give a bogus value of 72x72 dpi.
{tXResolution, dtRational, []uint32{72, 1}},
{tYResolution, dtRational, []uint32{72, 1}},
{tResolutionUnit, dtShort, []uint32{resPerInch}},
}
if pr != prNone {
ifd = append(ifd, ifdEntry{tPredictor, dtShort, []uint32{pr}})
}
if len(colorMap) != 0 {
ifd = append(ifd, ifdEntry{tColorMap, dtShort, colorMap})
}
if extraSamples > 0 {
ifd = append(ifd, ifdEntry{tExtraSamples, dtShort, []uint32{extraSamples}})
}
return writeIFD(w, imageLen+8, ifd)
}

10
vendor/modules.txt vendored
View File

@ -69,6 +69,9 @@ github.com/coreos/go-oidc/v3/oidc
# github.com/davecgh/go-spew v1.1.1 # github.com/davecgh/go-spew v1.1.1
## explicit ## explicit
github.com/davecgh/go-spew/spew github.com/davecgh/go-spew/spew
# github.com/disintegration/imaging v1.6.2
## explicit
github.com/disintegration/imaging
# github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b # github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b
## explicit; go 1.12 ## explicit; go 1.12
github.com/dsoprea/go-exif/v3 github.com/dsoprea/go-exif/v3
@ -282,7 +285,6 @@ github.com/modern-go/concurrent
github.com/modern-go/reflect2 github.com/modern-go/reflect2
# github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 # github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
## explicit ## explicit
github.com/nfnt/resize
# github.com/oklog/ulid v1.3.1 # github.com/oklog/ulid v1.3.1
## explicit ## explicit
github.com/oklog/ulid github.com/oklog/ulid
@ -617,6 +619,12 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/exp/constraints golang.org/x/exp/constraints
golang.org/x/exp/slices golang.org/x/exp/slices
# golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8
## explicit; go 1.12
golang.org/x/image/bmp
golang.org/x/image/ccitt
golang.org/x/image/tiff
golang.org/x/image/tiff/lzw
# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 # golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/mod/semver golang.org/x/mod/semver