[chore] update bun + extras v1.1.16 -> v1.1.17 (#2534)
This commit is contained in:
parent
a43ce99da9
commit
6433a50582
16
go.mod
16
go.mod
|
@ -51,10 +51,10 @@ require (
|
|||
github.com/technologize/otel-go-contrib v1.1.0
|
||||
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
|
||||
github.com/ulule/limiter/v3 v3.11.2
|
||||
github.com/uptrace/bun v1.1.16
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.1.16
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.16
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.16
|
||||
github.com/uptrace/bun v1.1.17
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.1.17
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.17
|
||||
github.com/wagslane/go-password-validator v0.3.0
|
||||
github.com/yuin/goldmark v1.6.0
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
|
@ -108,7 +108,7 @@ require (
|
|||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-errors/errors v1.4.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
|
@ -136,7 +136,7 @@ require (
|
|||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
|
@ -164,8 +164,8 @@ require (
|
|||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.2 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.3 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
|
|
32
go.sum
32
go.sum
|
@ -202,8 +202,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
|
|||
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
|
@ -391,8 +391,8 @@ github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3v
|
|||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||
|
@ -550,24 +550,24 @@ github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4d
|
|||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA=
|
||||
github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI=
|
||||
github.com/uptrace/bun v1.1.16 h1:cn9cgEMFwcyYRsQLfxCRMUxyK1WaHwOVrR3TvzEFZ/A=
|
||||
github.com/uptrace/bun v1.1.16/go.mod h1:7HnsMRRvpLFUcquJxp22JO8PsWKpFQO/gNXqqsuGWg8=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.1.16 h1:eUPZ+YCJ69BA+W1X1ZmpOJSkv1oYtinr0zCXf7zCo5g=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.1.16/go.mod h1:KQjfx/r6JM0OXfbv0rFrxAbdkPD7idK8VitnjIV9fZI=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.16 h1:gbc9BP/e4sNOB9VBj+Si46dpOz2oktmZPidkda92GYY=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.16/go.mod h1:YNezpK7fIn5Wa2WGmTCZ/nEyiswcXmuT4iNWADeL1x4=
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.16 h1:qkLTaTZK3FZk3b2P/stO/krS7KX9Fq5wSOj7Hlb2HG8=
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.16/go.mod h1:JwEH0kdXFnzYuK8D6eXUrf9HKsYy5wmB+lqQ/+dvH4E=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.2 h1:USRngIQppxeyb39XzkVHXwQesKK0+JSwnHE/1c7fgic=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.2/go.mod h1:1frv9RN1rlTq0jzCq+mVuEQisubZCQ4OU6S/8CaHzGY=
|
||||
github.com/uptrace/bun v1.1.17 h1:qxBaEIo0hC/8O3O6GrMDKxqyT+mw5/s0Pn/n6xjyGIk=
|
||||
github.com/uptrace/bun v1.1.17/go.mod h1:hATAzivtTIRsSJR4B8AXR+uABqnQxr3myKDKEf5iQ9U=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.1.17 h1:NsvFVHAx1Az6ytlAD/B6ty3cVE6j9Yp82bjqd9R9hOs=
|
||||
github.com/uptrace/bun/dialect/pgdialect v1.1.17/go.mod h1:fLBDclNc7nKsZLzNjFL6BqSdgJzbj2HdnyOnLoDvAME=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17 h1:i8NFU9r8YuavNFaYlNqi4ppn+MgoHtqLgpWQDrVTjm0=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17/go.mod h1:YF0FO4VVnY9GHNH6rM4r3STlVEBxkOc6L88Bm5X5mzA=
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.17 h1:RLEJdHH06RI9BLg06Vu1JHJ3KNHQCfwa2Fa3x+56qkk=
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.17/go.mod h1:xV7AYrCFji4Sio6N9X+Cz+XJ+JuHq6TQQjuxaVbsypk=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.3 h1:LNi0Qa7869/loPjz2kmMvp/jwZZnMZ9scMJKhDJ1DIo=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.3/go.mod h1:jyigonKik3C5V895QNiAGpKYKEvFuqjw9qAEZks1mUg=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.14.0/go.mod h1:ol1PCaL0dX20wC0htZ7sYCsvCYmrouYra0zHzaclZhE=
|
||||
github.com/valyala/fasthttp v1.47.0 h1:y7moDoxYzMooFpT5aHgNgVOQDrS3qlkfiP9mDtGGK9c=
|
||||
github.com/valyala/fasthttp v1.47.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/wagslane/go-password-validator v0.3.0 h1:vfxOPzGHkz5S146HDpavl0cw1DSVP061Ry2PX0/ON6I=
|
||||
|
|
|
@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
|
|||
| Adding a name to a logger | `WithName` | no API |
|
||||
| Modify verbosity of log entries in a call chain | `V` | no API |
|
||||
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
|
||||
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
|
||||
|
||||
The high-level slog API is explicitly meant to be one of many different APIs
|
||||
that can be layered on top of a shared `slog.Handler`. logr is one such
|
||||
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
|
||||
package.
|
||||
alternative API, with [interoperability](#slog-interoperability) provided by
|
||||
some conversion functions.
|
||||
|
||||
### Inspiration
|
||||
|
||||
|
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
|
|||
## slog interoperability
|
||||
|
||||
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
|
||||
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
|
||||
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
|
||||
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
|
||||
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
|
||||
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
|
||||
slog API. `slogr` itself leaves that to the caller.
|
||||
slog API.
|
||||
|
||||
## Using a `logr.Sink` as backend for slog
|
||||
### Using a `logr.LogSink` as backend for slog
|
||||
|
||||
Ideally, a logr sink implementation should support both logr and slog by
|
||||
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
|
||||
implementing both the normal logr interface(s) and `SlogSink`. Because
|
||||
of a conflict in the parameters of the common `Enabled` method, it is [not
|
||||
possible to implement both slog.Handler and logr.Sink in the same
|
||||
type](https://github.com/golang/go/issues/59110).
|
||||
|
||||
If both are supported, log calls can go from the high-level APIs to the backend
|
||||
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
|
||||
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
|
||||
convert back and forth without adding additional wrappers, with one exception:
|
||||
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
|
||||
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||
log calls.
|
||||
|
||||
Such an implementation should also support values that implement specific
|
||||
|
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
|
|||
These drawbacks are severe enough that applications using a mixture of slog and
|
||||
logr should switch to a different backend.
|
||||
|
||||
## Using a `slog.Handler` as backend for logr
|
||||
### Using a `slog.Handler` as backend for logr
|
||||
|
||||
Using a plain `slog.Handler` without support for logr works better than the
|
||||
other direction:
|
||||
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
|
||||
by negating them.
|
||||
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
|
||||
- Stack unwinding is done by the `SlogSink` and the resulting program
|
||||
counter is passed to the `slog.Handler`.
|
||||
- Names added via `Logger.WithName` are gathered and recorded in an additional
|
||||
attribute with `logger` as key and the names separated by slash as value.
|
||||
|
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
|
|||
with logr implementations without slog support is not important, then
|
||||
`slog.Valuer` is sufficient.
|
||||
|
||||
## Context support for slog
|
||||
### Context support for slog
|
||||
|
||||
Storing a logger in a `context.Context` is not supported by
|
||||
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
|
||||
to fill this gap:
|
||||
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
|
||||
used to fill this gap. They store and retrieve a `slog.Logger` pointer
|
||||
under the same context key that is also used by `NewContext` and
|
||||
`FromContext` for `logr.Logger` value.
|
||||
|
||||
func HandlerFromContext(ctx context.Context) slog.Handler {
|
||||
logger, err := logr.FromContext(ctx)
|
||||
if err == nil {
|
||||
return slogr.NewSlogHandler(logger)
|
||||
}
|
||||
return slog.Default().Handler()
|
||||
}
|
||||
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
|
||||
automatically convert the `slog.Logger` to a
|
||||
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
|
||||
|
||||
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
|
||||
return logr.NewContext(ctx, slogr.NewLogr(handler))
|
||||
}
|
||||
With this approach, binaries which use either slog or logr are as efficient as
|
||||
possible with no unnecessary allocations. This is also why the API stores a
|
||||
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
|
||||
on retrieval would need to allocate one.
|
||||
|
||||
The downside is that storing and retrieving a `slog.Handler` needs more
|
||||
allocations compared to using a `logr.Logger`. Therefore the recommendation is
|
||||
to use the `logr.Logger` API in code which uses contextual logging.
|
||||
The downside is that switching back and forth needs more allocations. Because
|
||||
logr is the API that is already in use by different packages, in particular
|
||||
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
|
||||
uses contextual logging.
|
||||
|
||||
An alternative to adding values to a logger and storing that logger in the
|
||||
context is to store the values in the context and to configure a logging
|
||||
backend to extract those values when emitting log entries. This only works when
|
||||
log calls are passed the context, which is not supported by the logr API.
|
||||
|
||||
With the slog API, it is possible, but not
|
||||
required. https://github.com/veqryn/slog-context is a package for slog which
|
||||
provides additional support code for this approach. It also contains wrappers
|
||||
for the context functions in logr, so developers who prefer to not use the logr
|
||||
APIs directly can use those instead and the resulting code will still be
|
||||
interoperable with logr.
|
||||
|
||||
## FAQ
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
|
||||
// the value is always a Logger value. With Go >= 1.21, the value can be a
|
||||
// Logger value or a slog.Logger pointer.
|
||||
type contextKey struct{}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
//go:build !go1.21
|
||||
// +build !go1.21
|
||||
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
v := ctx.Value(contextKey{})
|
||||
if v == nil {
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case Logger:
|
||||
return v, nil
|
||||
case *slog.Logger:
|
||||
return FromSlogHandler(v.Handler()), nil
|
||||
default:
|
||||
// Not reached.
|
||||
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
|
||||
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
|
||||
v := ctx.Value(contextKey{})
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case Logger:
|
||||
return slog.New(ToSlogHandler(v))
|
||||
case *slog.Logger:
|
||||
return v
|
||||
default:
|
||||
// Not reached.
|
||||
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if logger, err := FromContext(ctx); err == nil {
|
||||
return logger
|
||||
}
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
|
||||
// provided slog.Logger.
|
||||
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
|
@ -100,6 +100,11 @@ type Options struct {
|
|||
// details, see docs for Go's time.Layout.
|
||||
TimestampFormat string
|
||||
|
||||
// LogInfoLevel tells funcr what key to use to log the info level.
|
||||
// If not specified, the info level will be logged as "level".
|
||||
// If this is set to "", the info level will not be logged at all.
|
||||
LogInfoLevel *string
|
||||
|
||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||
// more logs. Info logs at or below this level will be written, while logs
|
||||
// above this level will be discarded.
|
||||
|
@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
|||
if opts.MaxLogDepth == 0 {
|
||||
opts.MaxLogDepth = defaultMaxLogDepth
|
||||
}
|
||||
if opts.LogInfoLevel == nil {
|
||||
opts.LogInfoLevel = new(string)
|
||||
*opts.LogInfoLevel = "level"
|
||||
}
|
||||
f := Formatter{
|
||||
outputFormat: outfmt,
|
||||
prefix: "",
|
||||
|
@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
|||
// implementation. It should be constructed with NewFormatter. Some of
|
||||
// its methods directly implement logr.LogSink.
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
parentValuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
group string // for slog groups
|
||||
groupDepth int
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
|
@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
|
|||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('{')
|
||||
buf.WriteByte('{') // for the whole line
|
||||
}
|
||||
|
||||
vals := builtins
|
||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||
continuing := len(builtins) > 0
|
||||
if len(f.valuesStr) > 0 {
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
if continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
|
||||
groupDepth := f.groupDepth
|
||||
if f.group != "" {
|
||||
if f.valuesStr != "" || len(args) != 0 {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
} else {
|
||||
// The group was empty
|
||||
groupDepth--
|
||||
}
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}')
|
||||
|
||||
for i := 0; i < groupDepth; i++ {
|
||||
buf.WriteByte('}') // for the groups
|
||||
}
|
||||
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}') // for the whole line
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
|
@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
|||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
copied := false
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
k, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
if !copied {
|
||||
newList := make([]any, len(kvList))
|
||||
copy(newList, kvList)
|
||||
kvList = newList
|
||||
copied = true
|
||||
}
|
||||
k = f.nonStringKey(kvList[i])
|
||||
kvList[i] = k
|
||||
}
|
||||
|
@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
|||
|
||||
if i > 0 || continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
} else {
|
||||
// In theory the format could be something we don't understand. In
|
||||
// practice, we control it, so it won't be.
|
||||
|
@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
|||
}
|
||||
}
|
||||
|
||||
if escapeKeys {
|
||||
buf.WriteString(prettyString(k))
|
||||
} else {
|
||||
// this is faster
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(':')
|
||||
} else {
|
||||
buf.WriteByte('=')
|
||||
}
|
||||
buf.WriteString(f.quoted(k, escapeKeys))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.pretty(v))
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
func (f Formatter) quoted(str string, escape bool) string {
|
||||
if escape {
|
||||
return prettyString(str)
|
||||
}
|
||||
// this is faster
|
||||
return `"` + str + `"`
|
||||
}
|
||||
|
||||
func (f Formatter) comma() byte {
|
||||
if f.outputFormat == outputJSON {
|
||||
return ','
|
||||
}
|
||||
return ' '
|
||||
}
|
||||
|
||||
func (f Formatter) colon() byte {
|
||||
if f.outputFormat == outputJSON {
|
||||
return ':'
|
||||
}
|
||||
return '='
|
||||
}
|
||||
|
||||
func (f Formatter) pretty(value any) string {
|
||||
return f.prettyWithFlags(value, 0, 0)
|
||||
}
|
||||
|
@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||
// arbitrary keys might need escaping
|
||||
buf.WriteString(prettyString(k))
|
||||
buf.WriteByte(':')
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
|
@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
continue
|
||||
}
|
||||
if printComma {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
printComma = true // if we got here, we are rendering a field
|
||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||
|
@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
name = fld.Name
|
||||
}
|
||||
// field names can't contain characters which need escaping
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(name)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.quoted(name, false))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
|
@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
buf.WriteByte('[')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
e := v.Index(i)
|
||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||
|
@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
i := 0
|
||||
for it.Next() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
// If a map key supports TextMarshaler, use it.
|
||||
keystr := ""
|
||||
|
@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
}
|
||||
}
|
||||
buf.WriteString(keystr)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||
i++
|
||||
}
|
||||
|
@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
|
|||
return kvList
|
||||
}
|
||||
|
||||
// startGroup opens a new group scope (basically a sub-struct), which locks all
|
||||
// the current saved values and starts them anew. This is needed to satisfy
|
||||
// slog.
|
||||
func (f *Formatter) startGroup(group string) {
|
||||
// Unnamed groups are just inlined.
|
||||
if group == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Any saved values can no longer be changed.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
continuing := false
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
if f.group != "" && f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
|
||||
// NOTE: We don't close the scope here - that's done later, when a log line
|
||||
// is actually rendered (because we have N scopes to close).
|
||||
|
||||
f.parentValuesStr = buf.String()
|
||||
|
||||
// Start collecting new values.
|
||||
f.group = group
|
||||
f.groupDepth++
|
||||
f.valuesStr = ""
|
||||
f.values = nil
|
||||
}
|
||||
|
||||
// Init configures this Formatter from runtime info, such as the call depth
|
||||
// imposed by logr itself.
|
||||
// Note that this receiver is a pointer, so depth can be saved.
|
||||
|
@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
|
|||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "level", level, "msg", msg)
|
||||
if key := *f.opts.LogInfoLevel; key != "" {
|
||||
args = append(args, key, level)
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package funcr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
var _ logr.SlogSink = &fnlogger{}
|
||||
|
||||
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
|
||||
|
||||
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
kvList = attrToKVs(attr, kvList)
|
||||
return true
|
||||
})
|
||||
|
||||
if record.Level >= slog.LevelError {
|
||||
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
|
||||
} else {
|
||||
level := l.levelFromSlog(record.Level)
|
||||
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
kvList = attrToKVs(attr, kvList)
|
||||
}
|
||||
l.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithGroup(name string) logr.SlogSink {
|
||||
l.startGroup(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||
// and other details of slog.
|
||||
func attrToKVs(attr slog.Attr, kvList []any) []any {
|
||||
attrVal := attr.Value.Resolve()
|
||||
if attrVal.Kind() == slog.KindGroup {
|
||||
groupVal := attrVal.Group()
|
||||
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||
for _, attr := range groupVal {
|
||||
grpKVs = attrToKVs(attr, grpKVs)
|
||||
}
|
||||
if attr.Key == "" {
|
||||
// slog says we have to inline these
|
||||
kvList = append(kvList, grpKVs...)
|
||||
} else {
|
||||
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
|
||||
}
|
||||
} else if attr.Key != "" {
|
||||
kvList = append(kvList, attr.Key, attrVal.Any())
|
||||
}
|
||||
|
||||
return kvList
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
//
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l fnlogger) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
if result < 0 {
|
||||
result = 0 // because LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
|
@ -207,10 +207,6 @@ limitations under the License.
|
|||
// those.
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// New returns a new Logger instance. This is primarily used by libraries
|
||||
// implementing LogSink, rather than end users. Passing a nil sink will create
|
||||
// a Logger which discards all log lines.
|
||||
|
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
|
|||
return l.sink == nil
|
||||
}
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context.
|
||||
type contextKey struct{}
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// RuntimeInfo holds information that the logr "core" library knows which
|
||||
// LogSinks might want to know.
|
||||
type RuntimeInfo struct {
|
||||
|
|
|
@ -0,0 +1,192 @@
|
|||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
type slogHandler struct {
|
||||
// May be nil, in which case all logs get discarded.
|
||||
sink LogSink
|
||||
// Non-nil if sink is non-nil and implements SlogSink.
|
||||
slogSink SlogSink
|
||||
|
||||
// groupPrefix collects values from WithGroup calls. It gets added as
|
||||
// prefix to value keys when handling a log record.
|
||||
groupPrefix string
|
||||
|
||||
// levelBias can be set when constructing the handler to influence the
|
||||
// slog.Level of log records. A positive levelBias reduces the
|
||||
// slog.Level value. slog has no API to influence this value after the
|
||||
// handler got created, so it can only be set indirectly through
|
||||
// Logger.V.
|
||||
levelBias slog.Level
|
||||
}
|
||||
|
||||
var _ slog.Handler = &slogHandler{}
|
||||
|
||||
// groupSeparator is used to concatenate WithGroup names and attribute keys.
|
||||
const groupSeparator = "."
|
||||
|
||||
// GetLevel is used for black box unit testing.
|
||||
func (l *slogHandler) GetLevel() slog.Level {
|
||||
return l.levelBias
|
||||
}
|
||||
|
||||
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
|
||||
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
|
||||
}
|
||||
|
||||
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
|
||||
if l.slogSink != nil {
|
||||
// Only adjust verbosity level of log entries < slog.LevelError.
|
||||
if record.Level < slog.LevelError {
|
||||
record.Level -= l.levelBias
|
||||
}
|
||||
return l.slogSink.Handle(ctx, record)
|
||||
}
|
||||
|
||||
// No need to check for nil sink here because Handle will only be called
|
||||
// when Enabled returned true.
|
||||
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||
return true
|
||||
})
|
||||
if record.Level >= slog.LevelError {
|
||||
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
|
||||
} else {
|
||||
level := l.levelFromSlog(record.Level)
|
||||
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
|
||||
// are called by Handle, code in slog gets skipped.
|
||||
//
|
||||
// This offset currently (Go 1.21.0) works for calls through
|
||||
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
|
||||
// chain won't change. Wrapping the handler will also break unwinding. It's
|
||||
// still better than not adjusting at all....
|
||||
//
|
||||
// This cannot be done when constructing the handler because FromSlogHandler needs
|
||||
// access to the original sink without this adjustment. A second copy would
|
||||
// work, but then WithAttrs would have to be called for both of them.
|
||||
func (l *slogHandler) sinkWithCallDepth() LogSink {
|
||||
if sink, ok := l.sink.(CallDepthLogSink); ok {
|
||||
return sink.WithCallDepth(2)
|
||||
}
|
||||
return l.sink
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
if l.sink == nil || len(attrs) == 0 {
|
||||
return l
|
||||
}
|
||||
|
||||
clone := *l
|
||||
if l.slogSink != nil {
|
||||
clone.slogSink = l.slogSink.WithAttrs(attrs)
|
||||
clone.sink = clone.slogSink
|
||||
} else {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||
}
|
||||
clone.sink = l.sink.WithValues(kvList...)
|
||||
}
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithGroup(name string) slog.Handler {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
if name == "" {
|
||||
// slog says to inline empty groups
|
||||
return l
|
||||
}
|
||||
clone := *l
|
||||
if l.slogSink != nil {
|
||||
clone.slogSink = l.slogSink.WithGroup(name)
|
||||
clone.sink = clone.slogSink
|
||||
} else {
|
||||
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
|
||||
}
|
||||
return &clone
|
||||
}
|
||||
|
||||
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||
// and other details of slog.
|
||||
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
|
||||
attrVal := attr.Value.Resolve()
|
||||
if attrVal.Kind() == slog.KindGroup {
|
||||
groupVal := attrVal.Group()
|
||||
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||
prefix := groupPrefix
|
||||
if attr.Key != "" {
|
||||
prefix = addPrefix(groupPrefix, attr.Key)
|
||||
}
|
||||
for _, attr := range groupVal {
|
||||
grpKVs = attrToKVs(attr, prefix, grpKVs)
|
||||
}
|
||||
kvList = append(kvList, grpKVs...)
|
||||
} else if attr.Key != "" {
|
||||
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
|
||||
}
|
||||
|
||||
return kvList
|
||||
}
|
||||
|
||||
func addPrefix(prefix, name string) string {
|
||||
if prefix == "" {
|
||||
return name
|
||||
}
|
||||
if name == "" {
|
||||
return prefix
|
||||
}
|
||||
return prefix + groupSeparator + name
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
//
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l *slogHandler) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
result += l.levelBias // in case the original Logger had a V level
|
||||
if result < 0 {
|
||||
result = 0 // because LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// FromSlogHandler returns a Logger which writes to the slog.Handler.
|
||||
//
|
||||
// The logr verbosity level is mapped to slog levels such that V(0) becomes
|
||||
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
|
||||
func FromSlogHandler(handler slog.Handler) Logger {
|
||||
if handler, ok := handler.(*slogHandler); ok {
|
||||
if handler.sink == nil {
|
||||
return Discard()
|
||||
}
|
||||
return New(handler.sink).V(int(handler.levelBias))
|
||||
}
|
||||
return New(&slogSink{handler: handler})
|
||||
}
|
||||
|
||||
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
|
||||
//
|
||||
// The returned logger writes all records with level >= slog.LevelError as
|
||||
// error log entries with LogSink.Error, regardless of the verbosity level of
|
||||
// the Logger:
|
||||
//
|
||||
// logger := <some Logger with 0 as verbosity level>
|
||||
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
|
||||
//
|
||||
// The level of all other records gets reduced by the verbosity
|
||||
// level of the Logger and the result is negated. If it happens
|
||||
// to be negative, then it gets replaced by zero because a LogSink
|
||||
// is not expected to handled negative levels:
|
||||
//
|
||||
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
|
||||
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
|
||||
func ToSlogHandler(logger Logger) slog.Handler {
|
||||
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
|
||||
return sink.handler
|
||||
}
|
||||
|
||||
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
|
||||
if slogSink, ok := handler.sink.(SlogSink); ok {
|
||||
handler.slogSink = slogSink
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better. It then should
|
||||
// also support special slog values like slog.Group. When used as a
|
||||
// slog.Handler, the advantages are:
|
||||
//
|
||||
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
|
||||
// as intended by slog
|
||||
// - proper grouping of key/value pairs via WithGroup
|
||||
// - verbosity levels > slog.LevelInfo can be recorded
|
||||
// - less overhead
|
||||
//
|
||||
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
|
||||
// well. Developers can pick whatever API suits them better and/or mix
|
||||
// packages which use either API in the same binary with a common logging
|
||||
// implementation.
|
||||
//
|
||||
// This interface is necessary because the type implementing the LogSink
|
||||
// interface cannot also implement the slog.Handler interface due to the
|
||||
// different prototype of the common Enabled method.
|
||||
//
|
||||
// An implementation could support both interfaces in two different types, but then
|
||||
// additional interfaces would be needed to convert between those types in FromSlogHandler
|
||||
// and ToSlogHandler.
|
||||
type SlogSink interface {
|
||||
LogSink
|
||||
|
||||
Handle(ctx context.Context, record slog.Record) error
|
||||
WithAttrs(attrs []slog.Attr) SlogSink
|
||||
WithGroup(name string) SlogSink
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
_ LogSink = &slogSink{}
|
||||
_ CallDepthLogSink = &slogSink{}
|
||||
_ Underlier = &slogSink{}
|
||||
)
|
||||
|
||||
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
|
||||
type Underlier interface {
|
||||
// GetUnderlying returns the Handler used by the LogSink.
|
||||
GetUnderlying() slog.Handler
|
||||
}
|
||||
|
||||
const (
|
||||
// nameKey is used to log the `WithName` values as an additional attribute.
|
||||
nameKey = "logger"
|
||||
|
||||
// errKey is used to log the error parameter of Error as an additional attribute.
|
||||
errKey = "err"
|
||||
)
|
||||
|
||||
type slogSink struct {
|
||||
callDepth int
|
||||
name string
|
||||
handler slog.Handler
|
||||
}
|
||||
|
||||
func (l *slogSink) Init(info RuntimeInfo) {
|
||||
l.callDepth = info.CallDepth
|
||||
}
|
||||
|
||||
func (l *slogSink) GetUnderlying() slog.Handler {
|
||||
return l.handler
|
||||
}
|
||||
|
||||
func (l *slogSink) WithCallDepth(depth int) LogSink {
|
||||
newLogger := *l
|
||||
newLogger.callDepth += depth
|
||||
return &newLogger
|
||||
}
|
||||
|
||||
func (l *slogSink) Enabled(level int) bool {
|
||||
return l.handler.Enabled(context.Background(), slog.Level(-level))
|
||||
}
|
||||
|
||||
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
|
||||
l.log(nil, msg, slog.Level(-level), kvList...)
|
||||
}
|
||||
|
||||
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
|
||||
l.log(err, msg, slog.LevelError, kvList...)
|
||||
}
|
||||
|
||||
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
|
||||
var pcs [1]uintptr
|
||||
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
|
||||
runtime.Callers(3+l.callDepth, pcs[:])
|
||||
|
||||
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
|
||||
if l.name != "" {
|
||||
record.AddAttrs(slog.String(nameKey, l.name))
|
||||
}
|
||||
if err != nil {
|
||||
record.AddAttrs(slog.Any(errKey, err))
|
||||
}
|
||||
record.Add(kvList...)
|
||||
_ = l.handler.Handle(context.Background(), record)
|
||||
}
|
||||
|
||||
func (l slogSink) WithName(name string) LogSink {
|
||||
if l.name != "" {
|
||||
l.name += "/"
|
||||
}
|
||||
l.name += name
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
|
||||
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
|
||||
return &l
|
||||
}
|
||||
|
||||
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
|
||||
// We don't need the record itself, only its Add method.
|
||||
record := slog.NewRecord(time.Time{}, 0, "", 0)
|
||||
record.Add(kvList...)
|
||||
attrs := make([]slog.Attr, 0, record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
attrs = append(attrs, attr)
|
||||
return true
|
||||
})
|
||||
return attrs
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
|
||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo
|
||||
// +build darwin freebsd openbsd netbsd dragonfly hurd
|
||||
// +build !appengine
|
||||
// +build !tinygo
|
||||
|
||||
package isatty
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
//go:build appengine || js || nacl || wasm
|
||||
// +build appengine js nacl wasm
|
||||
//go:build (appengine || js || nacl || tinygo || wasm) && !windows
|
||||
// +build appengine js nacl tinygo wasm
|
||||
// +build !windows
|
||||
|
||||
package isatty
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
//go:build (linux || aix || zos) && !appengine
|
||||
//go:build (linux || aix || zos) && !appengine && !tinygo
|
||||
// +build linux aix zos
|
||||
// +build !appengine
|
||||
// +build !tinygo
|
||||
|
||||
package isatty
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Patterns for files created by this project.
|
||||
# For other files, use global gitignore.
|
||||
*.s3db
|
||||
.idea
|
||||
|
|
|
@ -1,3 +1,15 @@
|
|||
## [1.1.17](https://github.com/uptrace/bun/compare/v1.1.16...v1.1.17) (2024-01-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add CreateTxSQLMigrations function ([#916](https://github.com/uptrace/bun/issues/916)) ([c68ec7c](https://github.com/uptrace/bun/commit/c68ec7cfc418959eb7c79028be7ac91f97d462ef))
|
||||
* add Join to UpdateQuery ([#908](https://github.com/uptrace/bun/issues/908)) ([8c4d8be](https://github.com/uptrace/bun/commit/8c4d8be3aa4e64582698b37fd21434b8960dddc0))
|
||||
* bunslog.QueryHook for Bun logging using `log/slog` ([#904](https://github.com/uptrace/bun/issues/904)) ([4953367](https://github.com/uptrace/bun/commit/495336731da0a995aa28c7bc84345c7825408e48))
|
||||
* dbfixture.New to accept IDB interface ([#900](https://github.com/uptrace/bun/issues/900)) ([2dee174](https://github.com/uptrace/bun/commit/2dee174bc4d09a45caeeede2885306e5fd10002d))
|
||||
|
||||
|
||||
|
||||
## [1.1.16](https://github.com/uptrace/bun/compare/v1.1.15...v1.1.16) (2023-09-16)
|
||||
|
||||
|
||||
|
|
|
@ -108,3 +108,7 @@ func (d *Dialect) AppendUint32(b []byte, n uint32) []byte {
|
|||
func (d *Dialect) AppendUint64(b []byte, n uint64) []byte {
|
||||
return strconv.AppendInt(b, int64(n), 10)
|
||||
}
|
||||
|
||||
func (d *Dialect) AppendSequence(b []byte, _ *schema.Table, _ *schema.Field) []byte {
|
||||
return append(b, " GENERATED BY DEFAULT AS IDENTITY"...)
|
||||
}
|
||||
|
|
|
@ -2,5 +2,5 @@ package pgdialect
|
|||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.1.16"
|
||||
return "1.1.17"
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ func New() *Dialect {
|
|||
feature.InsertOnConflict |
|
||||
feature.TableNotExists |
|
||||
feature.SelectExists |
|
||||
feature.AutoIncrement |
|
||||
feature.CompositeIn
|
||||
return d
|
||||
}
|
||||
|
@ -91,6 +92,25 @@ func (d *Dialect) DefaultVarcharLen() int {
|
|||
return 0
|
||||
}
|
||||
|
||||
// AppendSequence adds AUTOINCREMENT keyword to the column definition. As per [documentation],
|
||||
// AUTOINCREMENT is only valid for INTEGER PRIMARY KEY, and this method will be a noop for other columns.
|
||||
//
|
||||
// Because this is a valid construct:
|
||||
// CREATE TABLE ("id" INTEGER PRIMARY KEY AUTOINCREMENT);
|
||||
// and this is not:
|
||||
// CREATE TABLE ("id" INTEGER AUTOINCREMENT, PRIMARY KEY ("id"));
|
||||
// AppendSequence adds a primary key constraint as a *side-effect*. Callers should expect it to avoid building invalid SQL.
|
||||
// SQLite also [does not support] AUTOINCREMENT column in composite primary keys.
|
||||
//
|
||||
// [documentation]: https://www.sqlite.org/autoinc.html
|
||||
// [does not support]: https://stackoverflow.com/a/6793274/14726116
|
||||
func (d *Dialect) AppendSequence(b []byte, table *schema.Table, field *schema.Field) []byte {
|
||||
if field.IsPK && len(table.PKs) == 1 && field.CreateTableSQLType == sqltype.Integer {
|
||||
b = append(b, " PRIMARY KEY AUTOINCREMENT"...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func fieldSQLType(field *schema.Field) string {
|
||||
switch field.DiscoveredSQLType {
|
||||
case sqltype.SmallInt, sqltype.BigInt:
|
||||
|
|
|
@ -2,5 +2,5 @@ package sqlitedialect
|
|||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.1.16"
|
||||
return "1.1.17"
|
||||
}
|
||||
|
|
|
@ -158,6 +158,11 @@ SELECT 1
|
|||
SELECT 2
|
||||
`
|
||||
|
||||
const transactionalSQLTemplate = `SET statement_timeout = 0;
|
||||
|
||||
SELECT 1;
|
||||
`
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type MigrationSlice []Migration
|
||||
|
|
|
@ -267,19 +267,19 @@ func (m *Migrator) CreateGoMigration(
|
|||
return mf, nil
|
||||
}
|
||||
|
||||
// CreateSQLMigrations creates an up and down SQL migration files.
|
||||
func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
||||
// CreateTxSQLMigration creates transactional up and down SQL migration files.
|
||||
func (m *Migrator) CreateTxSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
||||
name, err := m.genMigrationName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
up, err := m.createSQL(ctx, name+".up.sql")
|
||||
up, err := m.createSQL(ctx, name+".up.tx.sql", true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
down, err := m.createSQL(ctx, name+".down.sql")
|
||||
down, err := m.createSQL(ctx, name+".down.tx.sql", true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -287,10 +287,35 @@ func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*Mig
|
|||
return []*MigrationFile{up, down}, nil
|
||||
}
|
||||
|
||||
func (m *Migrator) createSQL(ctx context.Context, fname string) (*MigrationFile, error) {
|
||||
// CreateSQLMigrations creates up and down SQL migration files.
|
||||
func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
||||
name, err := m.genMigrationName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
up, err := m.createSQL(ctx, name+".up.sql", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
down, err := m.createSQL(ctx, name+".down.sql", false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []*MigrationFile{up, down}, nil
|
||||
}
|
||||
|
||||
func (m *Migrator) createSQL(ctx context.Context, fname string, transactional bool) (*MigrationFile, error) {
|
||||
fpath := filepath.Join(m.migrations.getDirectory(), fname)
|
||||
|
||||
if err := os.WriteFile(fpath, []byte(sqlTemplate), 0o644); err != nil {
|
||||
template := sqlTemplate
|
||||
if transactional {
|
||||
template = transactionalSQLTemplate
|
||||
}
|
||||
|
||||
if err := os.WriteFile(fpath, []byte(template), 0o644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "gobun",
|
||||
"version": "1.1.16",
|
||||
"version": "1.1.17",
|
||||
"main": "index.js",
|
||||
"repository": "git@github.com:uptrace/bun.git",
|
||||
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
|
||||
|
|
|
@ -332,8 +332,8 @@ func (q *InsertQuery) appendStructValues(
|
|||
switch {
|
||||
case isTemplate:
|
||||
b = append(b, '?')
|
||||
case (f.IsPtr && f.HasNilValue(strct)) || (f.NullZero && f.HasZeroValue(strct)):
|
||||
if q.db.features.Has(feature.DefaultPlaceholder) {
|
||||
case q.marshalsToDefault(f, strct):
|
||||
if q.db.HasFeature(feature.DefaultPlaceholder) {
|
||||
b = append(b, "DEFAULT"...)
|
||||
} else if f.SQLDefault != "" {
|
||||
b = append(b, f.SQLDefault...)
|
||||
|
@ -410,11 +410,9 @@ func (q *InsertQuery) getFields() ([]*schema.Field, error) {
|
|||
q.addReturningField(f)
|
||||
continue
|
||||
}
|
||||
if f.NotNull && f.SQLDefault == "" {
|
||||
if (f.IsPtr && f.HasNilValue(strct)) || (f.NullZero && f.HasZeroValue(strct)) {
|
||||
q.addReturningField(f)
|
||||
continue
|
||||
}
|
||||
if f.NotNull && q.marshalsToDefault(f, strct) {
|
||||
q.addReturningField(f)
|
||||
continue
|
||||
}
|
||||
fields = append(fields, f)
|
||||
}
|
||||
|
@ -422,6 +420,13 @@ func (q *InsertQuery) getFields() ([]*schema.Field, error) {
|
|||
return fields, nil
|
||||
}
|
||||
|
||||
// marshalsToDefault checks if the value will be marshaled as DEFAULT or NULL (if DEFAULT placeholder is not supported)
|
||||
// when appending it to the VALUES clause in place of the given field.
|
||||
func (q InsertQuery) marshalsToDefault(f *schema.Field, v reflect.Value) bool {
|
||||
return (f.IsPtr && f.HasNilValue(v)) ||
|
||||
(f.HasZeroValue(v) && (f.NullZero || f.SQLDefault != ""))
|
||||
}
|
||||
|
||||
func (q *InsertQuery) appendFields(
|
||||
fmter schema.Formatter, b []byte, fields []*schema.Field,
|
||||
) []byte {
|
||||
|
|
|
@ -29,7 +29,7 @@ func NewMergeQuery(db *DB) *MergeQuery {
|
|||
conn: db.DB,
|
||||
},
|
||||
}
|
||||
if !(q.db.dialect.Name() == dialect.MSSQL || q.db.dialect.Name() == dialect.PG) {
|
||||
if q.db.dialect.Name() != dialect.MSSQL && q.db.dialect.Name() != dialect.PG {
|
||||
q.err = errors.New("bun: merge not supported for current dialect")
|
||||
}
|
||||
return q
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package bun
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
@ -19,6 +20,7 @@ type CreateTableQuery struct {
|
|||
|
||||
temp bool
|
||||
ifNotExists bool
|
||||
fksFromRel bool // Create foreign keys captured in table's relations.
|
||||
|
||||
// varchar changes the default length for VARCHAR columns.
|
||||
// Because some dialects require that length is always specified for VARCHAR type,
|
||||
|
@ -120,21 +122,9 @@ func (q *CreateTableQuery) TableSpace(tablespace string) *CreateTableQuery {
|
|||
return q
|
||||
}
|
||||
|
||||
// WithForeignKeys adds a FOREIGN KEY clause for each of the model's existing relations.
|
||||
func (q *CreateTableQuery) WithForeignKeys() *CreateTableQuery {
|
||||
for _, relation := range q.tableModel.Table().Relations {
|
||||
if relation.Type == schema.ManyToManyRelation ||
|
||||
relation.Type == schema.HasManyRelation {
|
||||
continue
|
||||
}
|
||||
|
||||
q = q.ForeignKey("(?) REFERENCES ? (?) ? ?",
|
||||
Safe(appendColumns(nil, "", relation.BaseFields)),
|
||||
relation.JoinTable.SQLName,
|
||||
Safe(appendColumns(nil, "", relation.JoinFields)),
|
||||
Safe(relation.OnUpdate),
|
||||
Safe(relation.OnDelete),
|
||||
)
|
||||
}
|
||||
q.fksFromRel = true
|
||||
return q
|
||||
}
|
||||
|
||||
|
@ -157,7 +147,7 @@ func (q *CreateTableQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []by
|
|||
b = append(b, "TEMP "...)
|
||||
}
|
||||
b = append(b, "TABLE "...)
|
||||
if q.ifNotExists && fmter.Dialect().Features().Has(feature.TableNotExists) {
|
||||
if q.ifNotExists && fmter.HasFeature(feature.TableNotExists) {
|
||||
b = append(b, "IF NOT EXISTS "...)
|
||||
}
|
||||
b, err = q.appendFirstTable(fmter, b)
|
||||
|
@ -178,19 +168,12 @@ func (q *CreateTableQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []by
|
|||
if field.NotNull {
|
||||
b = append(b, " NOT NULL"...)
|
||||
}
|
||||
if field.AutoIncrement {
|
||||
switch {
|
||||
case fmter.Dialect().Features().Has(feature.AutoIncrement):
|
||||
b = append(b, " AUTO_INCREMENT"...)
|
||||
case fmter.Dialect().Features().Has(feature.Identity):
|
||||
b = append(b, " IDENTITY"...)
|
||||
}
|
||||
}
|
||||
if field.Identity {
|
||||
if fmter.Dialect().Features().Has(feature.GeneratedIdentity) {
|
||||
b = append(b, " GENERATED BY DEFAULT AS IDENTITY"...)
|
||||
}
|
||||
|
||||
if (field.Identity && fmter.HasFeature(feature.GeneratedIdentity)) ||
|
||||
(field.AutoIncrement && (fmter.HasFeature(feature.AutoIncrement) || fmter.HasFeature(feature.Identity))) {
|
||||
b = q.db.dialect.AppendSequence(b, q.table, field)
|
||||
}
|
||||
|
||||
if field.SQLDefault != "" {
|
||||
b = append(b, " DEFAULT "...)
|
||||
b = append(b, field.SQLDefault...)
|
||||
|
@ -210,8 +193,20 @@ func (q *CreateTableQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []by
|
|||
}
|
||||
}
|
||||
|
||||
b = q.appendPKConstraint(b, q.table.PKs)
|
||||
// In SQLite AUTOINCREMENT is only valid for INTEGER PRIMARY KEY columns, so it might be that
|
||||
// a primary key constraint has already been created in dialect.AppendSequence() call above.
|
||||
// See sqldialect.Dialect.AppendSequence() for more details.
|
||||
if len(q.table.PKs) > 0 && !bytes.Contains(b, []byte("PRIMARY KEY")) {
|
||||
b = q.appendPKConstraint(b, q.table.PKs)
|
||||
}
|
||||
b = q.appendUniqueConstraints(fmter, b)
|
||||
|
||||
if q.fksFromRel {
|
||||
b, err = q.appendFKConstraintsRel(fmter, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
b, err = q.appendFKConstraints(fmter, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -295,13 +290,38 @@ func (q *CreateTableQuery) appendUniqueConstraint(
|
|||
return b
|
||||
}
|
||||
|
||||
// appendFKConstraintsRel appends a FOREIGN KEY clause for each of the model's existing relations.
|
||||
func (q *CreateTableQuery) appendFKConstraintsRel(fmter schema.Formatter, b []byte) (_ []byte, err error) {
|
||||
for _, rel := range q.tableModel.Table().Relations {
|
||||
if rel.References() {
|
||||
b, err = q.appendFK(fmter, b, schema.QueryWithArgs{
|
||||
Query: "(?) REFERENCES ? (?) ? ?",
|
||||
Args: []interface{}{
|
||||
Safe(appendColumns(nil, "", rel.BaseFields)),
|
||||
rel.JoinTable.SQLName,
|
||||
Safe(appendColumns(nil, "", rel.JoinFields)),
|
||||
Safe(rel.OnUpdate),
|
||||
Safe(rel.OnDelete),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (q *CreateTableQuery) appendFK(fmter schema.Formatter, b []byte, fk schema.QueryWithArgs) (_ []byte, err error) {
|
||||
b = append(b, ", FOREIGN KEY "...)
|
||||
return fk.AppendQuery(fmter, b)
|
||||
}
|
||||
|
||||
func (q *CreateTableQuery) appendFKConstraints(
|
||||
fmter schema.Formatter, b []byte,
|
||||
) (_ []byte, err error) {
|
||||
for _, fk := range q.fks {
|
||||
b = append(b, ", FOREIGN KEY "...)
|
||||
b, err = fk.AppendQuery(fmter, b)
|
||||
if err != nil {
|
||||
if b, err = q.appendFK(fmter, b, fk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -309,10 +329,6 @@ func (q *CreateTableQuery) appendFKConstraints(
|
|||
}
|
||||
|
||||
func (q *CreateTableQuery) appendPKConstraint(b []byte, pks []*schema.Field) []byte {
|
||||
if len(pks) == 0 {
|
||||
return b
|
||||
}
|
||||
|
||||
b = append(b, ", PRIMARY KEY ("...)
|
||||
b = appendColumns(b, "", pks)
|
||||
b = append(b, ")"...)
|
||||
|
@ -364,3 +380,12 @@ func (q *CreateTableQuery) afterCreateTableHook(ctx context.Context) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *CreateTableQuery) String() string {
|
||||
buf, err := q.AppendQuery(q.db.Formatter(), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return string(buf)
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ type UpdateQuery struct {
|
|||
setQuery
|
||||
idxHintsQuery
|
||||
|
||||
joins []joinQuery
|
||||
omitZero bool
|
||||
}
|
||||
|
||||
|
@ -133,6 +134,33 @@ func (q *UpdateQuery) OmitZero() *UpdateQuery {
|
|||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func (q *UpdateQuery) Join(join string, args ...interface{}) *UpdateQuery {
|
||||
q.joins = append(q.joins, joinQuery{
|
||||
join: schema.SafeQuery(join, args),
|
||||
})
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *UpdateQuery) JoinOn(cond string, args ...interface{}) *UpdateQuery {
|
||||
return q.joinOn(cond, args, " AND ")
|
||||
}
|
||||
|
||||
func (q *UpdateQuery) JoinOnOr(cond string, args ...interface{}) *UpdateQuery {
|
||||
return q.joinOn(cond, args, " OR ")
|
||||
}
|
||||
|
||||
func (q *UpdateQuery) joinOn(cond string, args []interface{}, sep string) *UpdateQuery {
|
||||
if len(q.joins) == 0 {
|
||||
q.err = errors.New("bun: query has no joins")
|
||||
return q
|
||||
}
|
||||
j := &q.joins[len(q.joins)-1]
|
||||
j.on = append(j.on, schema.SafeQueryWithSep(cond, args, sep))
|
||||
return q
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func (q *UpdateQuery) WherePK(cols ...string) *UpdateQuery {
|
||||
q.addWhereCols(cols)
|
||||
return q
|
||||
|
@ -230,6 +258,13 @@ func (q *UpdateQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
|
|||
}
|
||||
}
|
||||
|
||||
for _, j := range q.joins {
|
||||
b, err = j.AppendQuery(fmter, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if q.hasFeature(feature.Output) && q.hasReturning() {
|
||||
b = append(b, " OUTPUT "...)
|
||||
b, err = q.appendOutput(fmter, b)
|
||||
|
|
|
@ -31,6 +31,10 @@ type Dialect interface {
|
|||
AppendJSON(b, jsonb []byte) []byte
|
||||
AppendBool(b []byte, v bool) []byte
|
||||
|
||||
// AppendSequence adds the appropriate instruction for the driver to create a sequence
|
||||
// from which (autoincremented) values for the column will be generated.
|
||||
AppendSequence(b []byte, t *Table, f *Field) []byte
|
||||
|
||||
// DefaultVarcharLen should be returned for dialects in which specifying VARCHAR length
|
||||
// is mandatory in queries that modify the schema (CREATE TABLE / ADD COLUMN, etc).
|
||||
// Dialects that do not have such requirement may return 0, which should be interpreted so by the caller.
|
||||
|
@ -177,3 +181,7 @@ func (d *nopDialect) IdentQuote() byte {
|
|||
func (d *nopDialect) DefaultVarcharLen() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *nopDialect) AppendSequence(b []byte, _ *Table, _ *Field) []byte {
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -30,6 +30,12 @@ type Relation struct {
|
|||
M2MJoinFields []*Field
|
||||
}
|
||||
|
||||
// References returns true if the table to which the Relation belongs needs to declare a foreign key constraint to create the relation.
|
||||
// For other relations, the constraint is created in either the referencing table (1:N, 'has-many' relations) or a mapping table (N:N, 'm2m' relations).
|
||||
func (r *Relation) References() bool {
|
||||
return r.Type == HasOneRelation || r.Type == BelongsToRelation
|
||||
}
|
||||
|
||||
func (r *Relation) String() string {
|
||||
return fmt.Sprintf("relation=%s", r.Field.GoName)
|
||||
}
|
||||
|
|
|
@ -377,7 +377,6 @@ func (t *Table) newField(f reflect.StructField, prefix string, index []int) *Fie
|
|||
}
|
||||
if s, ok := tag.Option("default"); ok {
|
||||
field.SQLDefault = s
|
||||
field.NullZero = true
|
||||
}
|
||||
if s, ok := field.Tag.Option("type"); ok {
|
||||
field.UserSQLType = s
|
||||
|
@ -477,7 +476,7 @@ func (t *Table) belongsToRelation(field *Field) *Relation {
|
|||
}
|
||||
|
||||
rel := &Relation{
|
||||
Type: HasOneRelation,
|
||||
Type: BelongsToRelation,
|
||||
Field: field,
|
||||
JoinTable: joinTable,
|
||||
}
|
||||
|
@ -571,7 +570,7 @@ func (t *Table) hasOneRelation(field *Field) *Relation {
|
|||
|
||||
joinTable := t.dialect.Tables().Ref(field.IndirectType)
|
||||
rel := &Relation{
|
||||
Type: BelongsToRelation,
|
||||
Type: HasOneRelation,
|
||||
Field: field,
|
||||
JoinTable: joinTable,
|
||||
}
|
||||
|
|
|
@ -2,5 +2,5 @@ package bun
|
|||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.1.16"
|
||||
return "1.1.17"
|
||||
}
|
||||
|
|
|
@ -21,6 +21,12 @@ func Open(driverName, dsn string, opts ...Option) (*sql.DB, error) {
|
|||
|
||||
func patchDB(db *sql.DB, dsn string, opts ...Option) (*sql.DB, error) {
|
||||
dbDriver := db.Driver()
|
||||
|
||||
// Close the db since we are about to open a new one.
|
||||
if err := db.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := newDriver(dbDriver, opts)
|
||||
|
||||
if _, ok := dbDriver.(driver.DriverContext); ok {
|
||||
|
|
|
@ -2,5 +2,5 @@ package otelsql
|
|||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "0.2.2"
|
||||
return "0.2.3"
|
||||
}
|
||||
|
|
|
@ -1,6 +1,30 @@
|
|||
## [5.4.1](https://github.com/vmihailenco/msgpack/compare/v5.4.0...v5.4.1) (2023-10-26)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **reflect:** not assignable to type ([edeaedd](https://github.com/vmihailenco/msgpack/commit/edeaeddb2d51868df8c6ff2d8a218b527aeaf5fd))
|
||||
|
||||
|
||||
|
||||
# [5.4.0](https://github.com/vmihailenco/msgpack/compare/v5.3.6...v5.4.0) (2023-10-01)
|
||||
|
||||
|
||||
|
||||
## [5.3.6](https://github.com/vmihailenco/msgpack/compare/v5.3.5...v5.3.6) (2023-10-01)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* allow overwriting time.Time parsing from extID 13 (for NodeJS Date) ([9a6b73b](https://github.com/vmihailenco/msgpack/commit/9a6b73b3588fd962d568715f4375e24b089f7066))
|
||||
* apply omitEmptyFlag to empty structs ([e5f8d03](https://github.com/vmihailenco/msgpack/commit/e5f8d03c0a1dd9cc571d648cd610305139078de5))
|
||||
* support sorted keys for map[string]bool ([690c1fa](https://github.com/vmihailenco/msgpack/commit/690c1fab9814fab4842295ea986111f49850d9a4))
|
||||
|
||||
|
||||
|
||||
## [5.3.5](https://github.com/vmihailenco/msgpack/compare/v5.3.4...v5.3.5) (2021-10-22)
|
||||
|
||||
|
||||
- Allow decoding `nil` code as boolean false.
|
||||
|
||||
## v5
|
||||
|
||||
|
|
|
@ -5,19 +5,18 @@
|
|||
[![Documentation](https://img.shields.io/badge/msgpack-documentation-informational)](https://msgpack.uptrace.dev/)
|
||||
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
|
||||
|
||||
> :heart:
|
||||
> [**Uptrace.dev** - All-in-one tool to optimize performance and monitor errors & logs](https://uptrace.dev/?utm_source=gh-msgpack&utm_campaign=gh-msgpack-var2)
|
||||
> msgpack is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
|
||||
> Uptrace is an [open source APM](https://uptrace.dev/get/open-source-apm.html) and blazingly fast
|
||||
> [distributed tracing tool](https://get.uptrace.dev/compare/distributed-tracing-tools.html) powered
|
||||
> by OpenTelemetry and ClickHouse. Give it a star as well!
|
||||
|
||||
## Resources
|
||||
|
||||
- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
|
||||
- [Documentation](https://msgpack.uptrace.dev)
|
||||
- [Chat](https://discord.gg/rWtp5Aj)
|
||||
- [Reference](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5)
|
||||
- [Examples](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#pkg-examples)
|
||||
|
||||
Other projects you may like:
|
||||
|
||||
- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
|
||||
- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
|
||||
|
||||
## Features
|
||||
|
||||
- Primitives, arrays, maps, structs, time.Time and interface{}.
|
||||
|
@ -84,3 +83,18 @@ func ExampleMarshal() {
|
|||
// Output: bar
|
||||
}
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [Golang ORM](https://github.com/uptrace/bun) for PostgreSQL, MySQL, MSSQL, and SQLite
|
||||
- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/)
|
||||
- [Golang HTTP router](https://github.com/uptrace/bunrouter)
|
||||
- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
|
||||
|
||||
## Contributors
|
||||
|
||||
Thanks to all the people who already contributed!
|
||||
|
||||
<a href="https://github.com/vmihailenco/msgpack/graphs/contributors">
|
||||
<img src="https://contributors-img.web.app/image?repo=vmihailenco/msgpack" />
|
||||
</a>
|
||||
|
|
|
@ -14,14 +14,16 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
looseInterfaceDecodingFlag uint32 = 1 << iota
|
||||
disallowUnknownFieldsFlag
|
||||
bytesAllocLimit = 1 << 20 // 1mb
|
||||
sliceAllocLimit = 1e6 // 1m elements
|
||||
maxMapSize = 1e6 // 1m elements
|
||||
)
|
||||
|
||||
const (
|
||||
bytesAllocLimit = 1e6 // 1mb
|
||||
sliceAllocLimit = 1e4
|
||||
maxMapSize = 1e6
|
||||
looseInterfaceDecodingFlag uint32 = 1 << iota
|
||||
disallowUnknownFieldsFlag
|
||||
usePreallocateValues
|
||||
disableAllocLimitFlag
|
||||
)
|
||||
|
||||
type bufReader interface {
|
||||
|
@ -53,7 +55,7 @@ func PutDecoder(dec *Decoder) {
|
|||
// in the value pointed to by v.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
dec := GetDecoder()
|
||||
|
||||
dec.UsePreallocateValues(true)
|
||||
dec.Reset(bytes.NewReader(data))
|
||||
err := dec.Decode(v)
|
||||
|
||||
|
@ -64,16 +66,14 @@ func Unmarshal(data []byte, v interface{}) error {
|
|||
|
||||
// A Decoder reads and decodes MessagePack values from an input stream.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
s io.ByteScanner
|
||||
buf []byte
|
||||
|
||||
rec []byte // accumulates read data if not nil
|
||||
|
||||
r io.Reader
|
||||
s io.ByteScanner
|
||||
mapDecoder func(*Decoder) (interface{}, error)
|
||||
structTag string
|
||||
buf []byte
|
||||
rec []byte
|
||||
dict []string
|
||||
flags uint32
|
||||
structTag string
|
||||
mapDecoder func(*Decoder) (interface{}, error)
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
|
@ -95,10 +95,9 @@ func (d *Decoder) Reset(r io.Reader) {
|
|||
|
||||
// ResetDict is like Reset, but also resets the dict.
|
||||
func (d *Decoder) ResetDict(r io.Reader, dict []string) {
|
||||
d.resetReader(r)
|
||||
d.ResetReader(r)
|
||||
d.flags = 0
|
||||
d.structTag = ""
|
||||
d.mapDecoder = nil
|
||||
d.dict = dict
|
||||
}
|
||||
|
||||
|
@ -110,10 +109,16 @@ func (d *Decoder) WithDict(dict []string, fn func(*Decoder) error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (d *Decoder) resetReader(r io.Reader) {
|
||||
func (d *Decoder) ResetReader(r io.Reader) {
|
||||
d.mapDecoder = nil
|
||||
d.dict = nil
|
||||
|
||||
if br, ok := r.(bufReader); ok {
|
||||
d.r = br
|
||||
d.s = br
|
||||
} else if r == nil {
|
||||
d.r = nil
|
||||
d.s = nil
|
||||
} else {
|
||||
br := bufio.NewReader(r)
|
||||
d.r = br
|
||||
|
@ -161,6 +166,24 @@ func (d *Decoder) UseInternedStrings(on bool) {
|
|||
}
|
||||
}
|
||||
|
||||
// UsePreallocateValues enables preallocating values in chunks
|
||||
func (d *Decoder) UsePreallocateValues(on bool) {
|
||||
if on {
|
||||
d.flags |= usePreallocateValues
|
||||
} else {
|
||||
d.flags &= ^usePreallocateValues
|
||||
}
|
||||
}
|
||||
|
||||
// DisableAllocLimit enables fully allocating slices/maps when the size is known
|
||||
func (d *Decoder) DisableAllocLimit(on bool) {
|
||||
if on {
|
||||
d.flags |= disableAllocLimitFlag
|
||||
} else {
|
||||
d.flags &= ^disableAllocLimitFlag
|
||||
}
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's buffer.
|
||||
// The reader is valid until the next call to Decode.
|
||||
func (d *Decoder) Buffered() io.Reader {
|
||||
|
@ -603,7 +626,11 @@ func (d *Decoder) readFull(b []byte) error {
|
|||
|
||||
func (d *Decoder) readN(n int) ([]byte, error) {
|
||||
var err error
|
||||
d.buf, err = readN(d.r, d.buf, n)
|
||||
if d.flags&disableAllocLimitFlag != 0 {
|
||||
d.buf, err = readN(d.r, d.buf, n)
|
||||
} else {
|
||||
d.buf, err = readNGrow(d.r, d.buf, n)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -615,6 +642,24 @@ func (d *Decoder) readN(n int) ([]byte, error) {
|
|||
}
|
||||
|
||||
func readN(r io.Reader, b []byte, n int) ([]byte, error) {
|
||||
if b == nil {
|
||||
if n == 0 {
|
||||
return make([]byte, 0), nil
|
||||
}
|
||||
b = make([]byte, 0, n)
|
||||
}
|
||||
|
||||
if n > cap(b) {
|
||||
b = append(b, make([]byte, n-len(b))...)
|
||||
} else if n <= cap(b) {
|
||||
b = b[:n]
|
||||
}
|
||||
|
||||
_, err := io.ReadFull(r, b)
|
||||
return b, err
|
||||
}
|
||||
|
||||
func readNGrow(r io.Reader, b []byte, n int) ([]byte, error) {
|
||||
if b == nil {
|
||||
if n == 0 {
|
||||
return make([]byte, 0), nil
|
||||
|
|
|
@ -13,6 +13,8 @@ var errArrayStruct = errors.New("msgpack: number of fields in array-encoded stru
|
|||
var (
|
||||
mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil))
|
||||
mapStringStringType = mapStringStringPtrType.Elem()
|
||||
mapStringBoolPtrType = reflect.TypeOf((*map[string]bool)(nil))
|
||||
mapStringBoolType = mapStringBoolPtrType.Elem()
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -33,7 +35,11 @@ func decodeMapValue(d *Decoder, v reflect.Value) error {
|
|||
}
|
||||
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.MakeMap(typ))
|
||||
ln := n
|
||||
if d.flags&disableAllocLimitFlag == 0 {
|
||||
ln = min(ln, maxMapSize)
|
||||
}
|
||||
v.Set(reflect.MakeMapWithSize(typ, ln))
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
|
@ -104,7 +110,11 @@ func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error {
|
|||
|
||||
m := *ptr
|
||||
if m == nil {
|
||||
*ptr = make(map[string]string, min(size, maxMapSize))
|
||||
ln := size
|
||||
if d.flags&disableAllocLimitFlag == 0 {
|
||||
ln = min(size, maxMapSize)
|
||||
}
|
||||
*ptr = make(map[string]string, ln)
|
||||
m = *ptr
|
||||
}
|
||||
|
||||
|
@ -147,7 +157,7 @@ func (d *Decoder) DecodeMap() (map[string]interface{}, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
m := make(map[string]interface{}, min(n, maxMapSize))
|
||||
m := make(map[string]interface{}, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
mk, err := d.DecodeString()
|
||||
|
@ -174,7 +184,7 @@ func (d *Decoder) DecodeUntypedMap() (map[interface{}]interface{}, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
m := make(map[interface{}]interface{}, min(n, maxMapSize))
|
||||
m := make(map[interface{}]interface{}, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
mk, err := d.decodeInterfaceCond()
|
||||
|
@ -222,7 +232,13 @@ func (d *Decoder) DecodeTypedMap() (interface{}, error) {
|
|||
}
|
||||
|
||||
mapType := reflect.MapOf(keyType, valueType)
|
||||
mapValue := reflect.MakeMap(mapType)
|
||||
|
||||
ln := n
|
||||
if d.flags&disableAllocLimitFlag == 0 {
|
||||
ln = min(ln, maxMapSize)
|
||||
}
|
||||
|
||||
mapValue := reflect.MakeMapWithSize(mapType, ln)
|
||||
mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value))
|
||||
|
||||
n--
|
||||
|
@ -234,17 +250,18 @@ func (d *Decoder) DecodeTypedMap() (interface{}, error) {
|
|||
}
|
||||
|
||||
func (d *Decoder) decodeTypedMapValue(v reflect.Value, n int) error {
|
||||
typ := v.Type()
|
||||
keyType := typ.Key()
|
||||
valueType := typ.Elem()
|
||||
|
||||
var (
|
||||
typ = v.Type()
|
||||
keyType = typ.Key()
|
||||
valueType = typ.Elem()
|
||||
)
|
||||
for i := 0; i < n; i++ {
|
||||
mk := reflect.New(keyType).Elem()
|
||||
mk := d.newValue(keyType).Elem()
|
||||
if err := d.DecodeValue(mk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mv := reflect.New(valueType).Elem()
|
||||
mv := d.newValue(valueType).Elem()
|
||||
if err := d.DecodeValue(mv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -11,9 +11,8 @@ import (
|
|||
type queryResult struct {
|
||||
query string
|
||||
key string
|
||||
values []interface{}
|
||||
hasAsterisk bool
|
||||
|
||||
values []interface{}
|
||||
}
|
||||
|
||||
func (q *queryResult) nextKey() {
|
||||
|
|
|
@ -49,7 +49,7 @@ func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
ss := makeStrings(*ptr, n)
|
||||
ss := makeStrings(*ptr, n, d.flags&disableAllocLimitFlag != 0)
|
||||
for i := 0; i < n; i++ {
|
||||
s, err := d.DecodeString()
|
||||
if err != nil {
|
||||
|
@ -62,8 +62,8 @@ func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func makeStrings(s []string, n int) []string {
|
||||
if n > sliceAllocLimit {
|
||||
func makeStrings(s []string, n int, noLimit bool) []string {
|
||||
if !noLimit && n > sliceAllocLimit {
|
||||
n = sliceAllocLimit
|
||||
}
|
||||
|
||||
|
@ -101,10 +101,17 @@ func decodeSliceValue(d *Decoder, v reflect.Value) error {
|
|||
v.Set(v.Slice(0, v.Cap()))
|
||||
}
|
||||
|
||||
noLimit := d.flags&disableAllocLimitFlag != 1
|
||||
|
||||
if noLimit && n > v.Len() {
|
||||
v.Set(growSliceValue(v, n, noLimit))
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
if i >= v.Len() {
|
||||
v.Set(growSliceValue(v, n))
|
||||
if !noLimit && i >= v.Len() {
|
||||
v.Set(growSliceValue(v, n, noLimit))
|
||||
}
|
||||
|
||||
elem := v.Index(i)
|
||||
if err := d.DecodeValue(elem); err != nil {
|
||||
return err
|
||||
|
@ -114,9 +121,9 @@ func decodeSliceValue(d *Decoder, v reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func growSliceValue(v reflect.Value, n int) reflect.Value {
|
||||
func growSliceValue(v reflect.Value, n int, noLimit bool) reflect.Value {
|
||||
diff := n - v.Len()
|
||||
if diff > sliceAllocLimit {
|
||||
if !noLimit && diff > sliceAllocLimit {
|
||||
diff = sliceAllocLimit
|
||||
}
|
||||
v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff))
|
||||
|
@ -163,7 +170,7 @@ func (d *Decoder) decodeSlice(c byte) ([]interface{}, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
s := make([]interface{}, 0, min(n, sliceAllocLimit))
|
||||
s := make([]interface{}, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
v, err := d.decodeInterfaceCond()
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
package msgpack
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var cachedValues struct {
|
||||
m map[reflect.Type]chan reflect.Value
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func cachedValue(t reflect.Type) reflect.Value {
|
||||
cachedValues.RLock()
|
||||
ch := cachedValues.m[t]
|
||||
cachedValues.RUnlock()
|
||||
if ch != nil {
|
||||
return <-ch
|
||||
}
|
||||
|
||||
cachedValues.Lock()
|
||||
defer cachedValues.Unlock()
|
||||
if ch = cachedValues.m[t]; ch != nil {
|
||||
return <-ch
|
||||
}
|
||||
|
||||
ch = make(chan reflect.Value, 256)
|
||||
go func() {
|
||||
for {
|
||||
ch <- reflect.New(t)
|
||||
}
|
||||
}()
|
||||
if cachedValues.m == nil {
|
||||
cachedValues.m = make(map[reflect.Type]chan reflect.Value, 8)
|
||||
}
|
||||
cachedValues.m[t] = ch
|
||||
return <-ch
|
||||
}
|
||||
|
||||
func (d *Decoder) newValue(t reflect.Type) reflect.Value {
|
||||
if d.flags&usePreallocateValues == 0 {
|
||||
return reflect.New(t)
|
||||
}
|
||||
|
||||
return cachedValue(t)
|
||||
}
|
|
@ -10,6 +10,7 @@ import (
|
|||
var (
|
||||
interfaceType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
stringType = reflect.TypeOf((*string)(nil)).Elem()
|
||||
boolType = reflect.TypeOf((*bool)(nil)).Elem()
|
||||
)
|
||||
|
||||
var valueDecoders []decoderFunc
|
||||
|
@ -127,12 +128,12 @@ func ptrValueDecoder(typ reflect.Type) decoderFunc {
|
|||
return func(d *Decoder, v reflect.Value) error {
|
||||
if d.hasNilCode() {
|
||||
if !v.IsNil() {
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
v.Set(d.newValue(typ).Elem())
|
||||
}
|
||||
return d.DecodeNil()
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
v.Set(d.newValue(typ.Elem()))
|
||||
}
|
||||
return decoder(d, v.Elem())
|
||||
}
|
||||
|
@ -154,7 +155,7 @@ func nilAwareDecoder(typ reflect.Type, fn decoderFunc) decoderFunc {
|
|||
return d.decodeNilValue(v)
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
v.Set(d.newValue(typ.Elem()))
|
||||
}
|
||||
return fn(d, v)
|
||||
}
|
||||
|
|
|
@ -75,15 +75,12 @@ func Marshal(v interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
type Encoder struct {
|
||||
w writer
|
||||
|
||||
buf []byte
|
||||
timeBuf []byte
|
||||
|
||||
dict map[string]int
|
||||
|
||||
flags uint32
|
||||
w writer
|
||||
dict map[string]int
|
||||
structTag string
|
||||
buf []byte
|
||||
timeBuf []byte
|
||||
flags uint32
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
|
@ -107,7 +104,7 @@ func (e *Encoder) Reset(w io.Writer) {
|
|||
|
||||
// ResetDict is like Reset, but also resets the dict.
|
||||
func (e *Encoder) ResetDict(w io.Writer, dict map[string]int) {
|
||||
e.resetWriter(w)
|
||||
e.ResetWriter(w)
|
||||
e.flags = 0
|
||||
e.structTag = ""
|
||||
e.dict = dict
|
||||
|
@ -121,9 +118,12 @@ func (e *Encoder) WithDict(dict map[string]int, fn func(*Encoder) error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (e *Encoder) resetWriter(w io.Writer) {
|
||||
func (e *Encoder) ResetWriter(w io.Writer) {
|
||||
e.dict = nil
|
||||
if bw, ok := w.(writer); ok {
|
||||
e.w = bw
|
||||
} else if w == nil {
|
||||
e.w = nil
|
||||
} else {
|
||||
e.w = newByteWriter(w)
|
||||
}
|
||||
|
@ -132,6 +132,7 @@ func (e *Encoder) resetWriter(w io.Writer) {
|
|||
// SetSortMapKeys causes the Encoder to encode map keys in increasing order.
|
||||
// Supported map types are:
|
||||
// - map[string]string
|
||||
// - map[string]bool
|
||||
// - map[string]interface{}
|
||||
func (e *Encoder) SetSortMapKeys(on bool) *Encoder {
|
||||
if on {
|
||||
|
|
|
@ -30,6 +30,32 @@ func encodeMapValue(e *Encoder, v reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func encodeMapStringBoolValue(e *Encoder, v reflect.Value) error {
|
||||
if v.IsNil() {
|
||||
return e.EncodeNil()
|
||||
}
|
||||
|
||||
if err := e.EncodeMapLen(v.Len()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := v.Convert(mapStringBoolType).Interface().(map[string]bool)
|
||||
if e.flags&sortMapKeysFlag != 0 {
|
||||
return e.encodeSortedMapStringBool(m)
|
||||
}
|
||||
|
||||
for mk, mv := range m {
|
||||
if err := e.EncodeString(mk); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.EncodeBool(mv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeMapStringStringValue(e *Encoder, v reflect.Value) error {
|
||||
if v.IsNil() {
|
||||
return e.EncodeNil()
|
||||
|
@ -113,6 +139,26 @@ func (e *Encoder) EncodeMapSorted(m map[string]interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeSortedMapStringBool(m map[string]bool) error {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
err := e.EncodeString(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = e.EncodeBool(m[k]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeSortedMapStringString(m map[string]string) error {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
|
@ -148,7 +194,7 @@ func encodeStructValue(e *Encoder, strct reflect.Value) error {
|
|||
if e.flags&arrayEncodedStructsFlag != 0 || structFields.AsArray {
|
||||
return encodeStructValueAsArray(e, strct, structFields.List)
|
||||
}
|
||||
fields := structFields.OmitEmpty(strct, e.flags&omitEmptyFlag != 0)
|
||||
fields := structFields.OmitEmpty(e, strct)
|
||||
|
||||
if err := e.EncodeMapLen(len(fields)); err != nil {
|
||||
return err
|
||||
|
|
|
@ -111,6 +111,8 @@ func _getEncoder(typ reflect.Type) encoderFunc {
|
|||
switch typ.Elem() {
|
||||
case stringType:
|
||||
return encodeMapStringStringValue
|
||||
case boolType:
|
||||
return encodeMapStringBoolValue
|
||||
case interfaceType:
|
||||
return encodeMapStringInterfaceValue
|
||||
}
|
||||
|
@ -198,6 +200,13 @@ func nilable(kind reflect.Kind) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func nilableType(t reflect.Type) bool {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
return nilable(t.Kind())
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func marshalBinaryValueAddr(e *Encoder, v reflect.Value) error {
|
||||
|
|
|
@ -96,7 +96,7 @@ func makeExtEncoder(
|
|||
func makeExtEncoderAddr(extEncoder encoderFunc) encoderFunc {
|
||||
return func(e *Encoder, v reflect.Value) error {
|
||||
if !v.CanAddr() {
|
||||
return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface())
|
||||
return fmt.Errorf("msgpack: EncodeExt(nonaddressable %T)", v.Interface())
|
||||
}
|
||||
return extEncoder(e, v.Addr())
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func makeExtDecoder(
|
|||
func makeExtDecoderAddr(extDecoder decoderFunc) decoderFunc {
|
||||
return func(d *Decoder, v reflect.Value) error {
|
||||
if !v.CanAddr() {
|
||||
return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface())
|
||||
return fmt.Errorf("msgpack: DecodeExt(nonaddressable %T)", v.Interface())
|
||||
}
|
||||
return extDecoder(d, v.Addr())
|
||||
}
|
||||
|
@ -254,9 +254,9 @@ func (d *Decoder) decodeInterfaceExt(c byte) (interface{}, error) {
|
|||
return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID)
|
||||
}
|
||||
|
||||
v := reflect.New(info.Type).Elem()
|
||||
v := d.newValue(info.Type).Elem()
|
||||
if nilable(v.Kind()) && v.IsNil() {
|
||||
v.Set(reflect.New(info.Type.Elem()))
|
||||
v.Set(d.newValue(info.Type.Elem()))
|
||||
}
|
||||
|
||||
if err := info.Decoder(d, v, extLen); err != nil {
|
||||
|
|
|
@ -57,18 +57,16 @@ func encodeInternedStringValue(e *Encoder, v reflect.Value) error {
|
|||
|
||||
func (e *Encoder) encodeInternedString(s string, intern bool) error {
|
||||
// Interned string takes at least 3 bytes. Plain string 1 byte + string len.
|
||||
if len(s) >= minInternedStringLen {
|
||||
if idx, ok := e.dict[s]; ok {
|
||||
return e.encodeInternedStringIndex(idx)
|
||||
}
|
||||
if idx, ok := e.dict[s]; ok {
|
||||
return e.encodeInternedStringIndex(idx)
|
||||
}
|
||||
|
||||
if intern && len(e.dict) < maxDictLen {
|
||||
if e.dict == nil {
|
||||
e.dict = make(map[string]int)
|
||||
}
|
||||
idx := len(e.dict)
|
||||
e.dict[s] = idx
|
||||
if intern && len(s) >= minInternedStringLen && len(e.dict) < maxDictLen {
|
||||
if e.dict == nil {
|
||||
e.dict = make(map[string]int)
|
||||
}
|
||||
idx := len(e.dict)
|
||||
e.dict[s] = idx
|
||||
}
|
||||
|
||||
return e.encodeNormalString(s)
|
||||
|
|
|
@ -43,8 +43,8 @@ func (m *RawMessage) DecodeMsgpack(dec *Decoder) error {
|
|||
//------------------------------------------------------------------------------
|
||||
|
||||
type unexpectedCodeError struct {
|
||||
code byte
|
||||
hint string
|
||||
code byte
|
||||
}
|
||||
|
||||
func (err unexpectedCodeError) Error() string {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{
|
||||
"name": "msgpack",
|
||||
"version": "5.3.5"
|
||||
"version": "5.4.1"
|
||||
}
|
||||
|
|
|
@ -26,6 +26,11 @@ func timeDecoder(d *Decoder, v reflect.Value, extLen int) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if tm.IsZero() {
|
||||
// Zero time does not have timezone information.
|
||||
tm = tm.UTC()
|
||||
}
|
||||
|
||||
ptr := v.Addr().Interface().(*time.Time)
|
||||
*ptr = tm
|
||||
|
||||
|
@ -103,7 +108,8 @@ func (d *Decoder) DecodeTime() (time.Time, error) {
|
|||
return time.Time{}, err
|
||||
}
|
||||
|
||||
if extID != timeExtID {
|
||||
// NodeJS seems to use extID 13.
|
||||
if extID != timeExtID && extID != 13 {
|
||||
return time.Time{}, fmt.Errorf("msgpack: invalid time ext id=%d", extID)
|
||||
}
|
||||
|
||||
|
|
|
@ -66,8 +66,8 @@ type structCache struct {
|
|||
}
|
||||
|
||||
type structCacheKey struct {
|
||||
tag string
|
||||
typ reflect.Type
|
||||
tag string
|
||||
}
|
||||
|
||||
func newStructCache() *structCache {
|
||||
|
@ -90,19 +90,20 @@ func (m *structCache) Fields(typ reflect.Type, tag string) *fields {
|
|||
//------------------------------------------------------------------------------
|
||||
|
||||
type field struct {
|
||||
encoder encoderFunc
|
||||
decoder decoderFunc
|
||||
name string
|
||||
index []int
|
||||
omitEmpty bool
|
||||
encoder encoderFunc
|
||||
decoder decoderFunc
|
||||
}
|
||||
|
||||
func (f *field) Omit(strct reflect.Value, forced bool) bool {
|
||||
func (f *field) Omit(e *Encoder, strct reflect.Value) bool {
|
||||
v, ok := fieldByIndex(strct, f.index)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
return (f.omitEmpty || forced) && isEmptyValue(v)
|
||||
forced := e.flags&omitEmptyFlag != 0
|
||||
return (f.omitEmpty || forced) && e.isEmptyValue(v)
|
||||
}
|
||||
|
||||
func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error {
|
||||
|
@ -152,7 +153,8 @@ func (fs *fields) warnIfFieldExists(name string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (fs *fields) OmitEmpty(strct reflect.Value, forced bool) []*field {
|
||||
func (fs *fields) OmitEmpty(e *Encoder, strct reflect.Value) []*field {
|
||||
forced := e.flags&omitEmptyFlag != 0
|
||||
if !fs.hasOmitEmpty && !forced {
|
||||
return fs.List
|
||||
}
|
||||
|
@ -160,7 +162,7 @@ func (fs *fields) OmitEmpty(strct reflect.Value, forced bool) []*field {
|
|||
fields := make([]*field, 0, len(fs.List))
|
||||
|
||||
for _, f := range fs.List {
|
||||
if !f.Omit(strct, forced) {
|
||||
if !f.Omit(e, strct) {
|
||||
fields = append(fields, f)
|
||||
}
|
||||
}
|
||||
|
@ -317,7 +319,7 @@ type isZeroer interface {
|
|||
IsZero() bool
|
||||
}
|
||||
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
func (e *Encoder) isEmptyValue(v reflect.Value) bool {
|
||||
kind := v.Kind()
|
||||
|
||||
for kind == reflect.Interface {
|
||||
|
@ -335,6 +337,10 @@ func isEmptyValue(v reflect.Value) bool {
|
|||
switch kind {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Struct:
|
||||
structFields := structs.Fields(v.Type(), e.structTag)
|
||||
fields := structFields.OmitEmpty(e, v)
|
||||
return len(fields) == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
|
@ -399,7 +405,7 @@ func indirectNil(v reflect.Value) (reflect.Value, bool) {
|
|||
if elemType.Kind() != reflect.Struct {
|
||||
return v, false
|
||||
}
|
||||
v.Set(reflect.New(elemType))
|
||||
v.Set(cachedValue(elemType))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
|
|
|
@ -2,5 +2,5 @@ package msgpack
|
|||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "5.3.5"
|
||||
return "5.4.1"
|
||||
}
|
||||
|
|
|
@ -214,7 +214,7 @@ github.com/go-fed/httpsig
|
|||
github.com/go-jose/go-jose/v3
|
||||
github.com/go-jose/go-jose/v3/cipher
|
||||
github.com/go-jose/go-jose/v3/json
|
||||
# github.com/go-logr/logr v1.3.0
|
||||
# github.com/go-logr/logr v1.4.1
|
||||
## explicit; go 1.18
|
||||
github.com/go-logr/logr
|
||||
github.com/go-logr/logr/funcr
|
||||
|
@ -366,7 +366,7 @@ github.com/leodido/go-urn
|
|||
# github.com/magiconair/properties v1.8.7
|
||||
## explicit; go 1.19
|
||||
github.com/magiconair/properties
|
||||
# github.com/mattn/go-isatty v0.0.19
|
||||
# github.com/mattn/go-isatty v0.0.20
|
||||
## explicit; go 1.15
|
||||
github.com/mattn/go-isatty
|
||||
# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0
|
||||
|
@ -719,7 +719,7 @@ github.com/ugorji/go/codec
|
|||
github.com/ulule/limiter/v3
|
||||
github.com/ulule/limiter/v3/drivers/store/common
|
||||
github.com/ulule/limiter/v3/drivers/store/memory
|
||||
# github.com/uptrace/bun v1.1.16
|
||||
# github.com/uptrace/bun v1.1.17
|
||||
## explicit; go 1.19
|
||||
github.com/uptrace/bun
|
||||
github.com/uptrace/bun/dialect
|
||||
|
@ -731,20 +731,20 @@ github.com/uptrace/bun/internal/parser
|
|||
github.com/uptrace/bun/internal/tagparser
|
||||
github.com/uptrace/bun/migrate
|
||||
github.com/uptrace/bun/schema
|
||||
# github.com/uptrace/bun/dialect/pgdialect v1.1.16
|
||||
# github.com/uptrace/bun/dialect/pgdialect v1.1.17
|
||||
## explicit; go 1.19
|
||||
github.com/uptrace/bun/dialect/pgdialect
|
||||
# github.com/uptrace/bun/dialect/sqlitedialect v1.1.16
|
||||
# github.com/uptrace/bun/dialect/sqlitedialect v1.1.17
|
||||
## explicit; go 1.19
|
||||
github.com/uptrace/bun/dialect/sqlitedialect
|
||||
# github.com/uptrace/bun/extra/bunotel v1.1.16
|
||||
# github.com/uptrace/bun/extra/bunotel v1.1.17
|
||||
## explicit; go 1.19
|
||||
github.com/uptrace/bun/extra/bunotel
|
||||
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.2
|
||||
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.2.3
|
||||
## explicit; go 1.18
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql
|
||||
# github.com/vmihailenco/msgpack/v5 v5.3.5
|
||||
## explicit; go 1.11
|
||||
# github.com/vmihailenco/msgpack/v5 v5.4.1
|
||||
## explicit; go 1.19
|
||||
github.com/vmihailenco/msgpack/v5
|
||||
github.com/vmihailenco/msgpack/v5/msgpcode
|
||||
# github.com/vmihailenco/tagparser/v2 v2.0.0
|
||||
|
|
Loading…
Reference in New Issue