So Go 1.21 gets a new package: log/slog for structured logging.
Even though there's a single package, it is actually multiple things:
log
compatible logger):
log/slog.JSONHandler and log/slog.TextHandlerI think of these, slog.Handler
is the most important.
You might not like slog.Logger
for reasons,
like not being context first, or allowing unstructured ("sugared") attributes,
and you might need features not available in the builtin handlers,
like special case processing of particular keys, or just a different output format.
This leaves slog.Handler
as the standard interchange between your favorite
frontend API and backend API.
Anyway, I had need of a custom implementation. But one thing tripped me up: "If a group has no Attrs (even if it has a non-empty key), ignore it.". For JSON, (or any other format with nested/deduped prefixes), this wasn't as easy to do. At first, I thought this required building up a tree, doing a depth first walk to prune out empty groups, then writing the result. Later, I realized I could instead optimistically write the keys, and roll back as needed if there ended up being no attributes in the group.
Below is my implementation, modify as appropriate to handle your special needs.
1package jsonlog
2
3import (
4 "context"
5 "encoding/json"
6 "io"
7 "log/slog"
8 "slices"
9 "strconv"
10 "sync"
11 "time"
12 "unicode/utf8"
13
14 "go.opentelemetry.io/otel/trace"
15)
16
17const (
18 // magic numbers to reduce number of slice resizes
19 // slog holds 5 attrs
20 stateBufferSize = 1024
21
22 // used by appendString
23 hex = "0123456789abcdef"
24)
25
26var (
27
28 // shared
29 globalSep = []byte(",")
30
31 // assert it is a handler
32 _ slog.Handler = new(handler)
33
34 // reduce allocations in steady state
35 pool = &sync.Pool{
36 New: func() any {
37 s := make([]byte, 0, stateBufferSize)
38 return &s
39 },
40 }
41
42 // used by appendString
43 safeSet = [utf8.RuneSelf]bool{
44 ' ': true,
45 '!': true,
46 '"': false,
47 '#': true,
48 '$': true,
49 '%': true,
50 '&': true,
51 '\'': true,
52 '(': true,
53 ')': true,
54 '*': true,
55 '+': true,
56 ',': true,
57 '-': true,
58 '.': true,
59 '/': true,
60 '0': true,
61 '1': true,
62 '2': true,
63 '3': true,
64 '4': true,
65 '5': true,
66 '6': true,
67 '7': true,
68 '8': true,
69 '9': true,
70 ':': true,
71 ';': true,
72 '<': true,
73 '=': true,
74 '>': true,
75 '?': true,
76 '@': true,
77 'A': true,
78 'B': true,
79 'C': true,
80 'D': true,
81 'E': true,
82 'F': true,
83 'G': true,
84 'H': true,
85 'I': true,
86 'J': true,
87 'K': true,
88 'L': true,
89 'M': true,
90 'N': true,
91 'O': true,
92 'P': true,
93 'Q': true,
94 'R': true,
95 'S': true,
96 'T': true,
97 'U': true,
98 'V': true,
99 'W': true,
100 'X': true,
101 'Y': true,
102 'Z': true,
103 '[': true,
104 '\\': false,
105 ']': true,
106 '^': true,
107 '_': true,
108 '`': true,
109 'a': true,
110 'b': true,
111 'c': true,
112 'd': true,
113 'e': true,
114 'f': true,
115 'g': true,
116 'h': true,
117 'i': true,
118 'j': true,
119 'k': true,
120 'l': true,
121 'm': true,
122 'n': true,
123 'o': true,
124 'p': true,
125 'q': true,
126 'r': true,
127 's': true,
128 't': true,
129 'u': true,
130 'v': true,
131 'w': true,
132 'x': true,
133 'y': true,
134 'z': true,
135 '{': true,
136 '|': true,
137 '}': true,
138 '~': true,
139 '\u007f': true,
140 }
141)
142
143func New(level slog.Level, out io.Writer) slog.Handler {
144 return &handler{
145 minLevel: level,
146 state: new(state),
147 mu: new(sync.Mutex),
148 w: out,
149 }
150}
151
152type handler struct {
153 minLevel slog.Level
154 state *state
155 mu *sync.Mutex
156 w io.Writer
157}
158
159func (h *handler) clone() *handler {
160 b0 := pool.Get().(*[]byte)
161 return &handler{
162 minLevel: h.minLevel,
163 state: h.state.clone(*b0),
164 mu: h.mu,
165 w: h.w,
166 }
167}
168
169func (h *handler) Enabled(ctx context.Context, l slog.Level) bool {
170 return l >= h.minLevel
171}
172
173func (h *handler) WithAttrs(attrs []slog.Attr) slog.Handler {
174 if len(attrs) == 0 {
175 return h
176 }
177 h2 := h.clone()
178 for _, a := range attrs {
179 h2.state.attr(a)
180 }
181 return h2
182}
183
184func (h *handler) WithGroup(name string) slog.Handler {
185 if name == "" {
186 return h
187 }
188 h2 := h.clone()
189 h2.state.openGroup(name)
190 return h2
191}
192
193func (h *handler) Handle(ctx context.Context, r slog.Record) error {
194 // add attrs to state
195 b0 := pool.Get().(*[]byte)
196 defer func() { pool.Put(b0) }()
197 state := h.state.clone(*b0)
198 r.Attrs(func(a slog.Attr) bool {
199 state.attr(a)
200 return true
201 })
202 state.closeAll()
203
204 // initialize write buffer
205 var buf []byte
206 if cap(state.buf)-len(state.buf) < 160+len(r.Message) {
207 buf = make([]byte, 0, len(state.buf)+160+len(r.Message))
208 } else {
209 b1 := pool.Get().(*[]byte)
210 defer func() { pool.Put(b1) }()
211 buf = (*b1)[:0]
212 }
213
214 buf = append(buf, `{`...)
215
216 // time
217 if !r.Time.IsZero() {
218 buf = append(buf, `"time":"`...)
219 buf = r.Time.AppendFormat(buf, time.RFC3339Nano)
220 buf = append(buf, `",`...)
221 }
222 // level
223 buf = append(buf, `"level":"`...)
224 buf = append(buf, r.Level.String()...)
225 buf = append(buf, `"`...)
226
227 // trace
228 spanCtx := trace.SpanContextFromContext(ctx)
229 if spanCtx.IsValid() {
230 buf = append(buf, `,"trace_id":"`...)
231 buf = append(buf, spanCtx.TraceID().String()...)
232 buf = append(buf, `","span_id":"`...)
233 buf = append(buf, spanCtx.SpanID().String()...)
234 buf = append(buf, `"`...)
235
236 }
237 // any other special keys
238 // e.g. file:line, attrs from ctx or extracted during attr processing by state.attr
239
240 // message
241 buf = append(buf, `,"message":`...)
242 buf = appendString(buf, r.Message)
243
244 // attrs
245 if len(state.buf) > 0 {
246 buf = append(buf, `,`...)
247 buf = append(buf, state.buf...)
248 }
249 buf = append(buf, "}\n"...)
250
251 h.mu.Lock()
252 defer h.mu.Unlock()
253 _, err := h.w.Write(buf)
254 return err
255}
256
257// state holds preformatted attributes
258type state struct {
259 confirmedLast int // length of buf when we last wrote a complete attr
260 groupOpenIdx []int // indexes before open groups, allows rollback on empty groups
261 separator []byte // separator to write before an attr or group
262 buf []byte // buffer of preformatted contents
263 // TODO hold special keys to be placed in top level (eg error)
264}
265
266func (h *state) clone(buf []byte) *state {
267 if cap(h.buf) > stateBufferSize {
268 buf = slices.Clone(h.buf)
269 } else {
270 buf = buf[:len(h.buf)]
271 copy(buf, h.buf)
272 }
273 s := &state{
274 h.confirmedLast,
275 slices.Clone(h.groupOpenIdx),
276 slices.Clone(h.separator),
277 buf,
278 }
279 return s
280}
281
282func (h *state) openGroup(n string) {
283 h.groupOpenIdx = append(h.groupOpenIdx, len(h.buf)) // record rollback point
284 h.buf = append(h.buf, h.separator...) // maybe need a separator
285 h.buf = appendString(h.buf, n) // key name
286 h.buf = append(h.buf, []byte(":{")...) // open group
287 h.separator = nil // no separator for first attr
288}
289
290func (h *state) closeGroup() {
291 lastGroupIdx := h.groupOpenIdx[len(h.groupOpenIdx)-1] // pop off the rollback point for current group
292 h.groupOpenIdx = h.groupOpenIdx[:len(h.groupOpenIdx)-1]
293 if h.confirmedLast > lastGroupIdx { // group was non empty
294 h.buf = append(h.buf, []byte("}")...) // close off the group
295 h.confirmedLast = len(h.buf) // record new last point
296 return
297 }
298 h.buf = h.buf[:lastGroupIdx] // all open subgroups were empty, rollback
299}
300
301func (h *state) closeAll() {
302 for range h.groupOpenIdx {
303 h.closeGroup()
304 }
305 h.groupOpenIdx = nil
306}
307
308func (h *state) attr(attr slog.Attr) {
309 val := attr.Value.Resolve() // handle logvaluer
310 if attr.Equal(slog.Attr{}) { // drop empty attr
311 return
312 } else if val.Kind() == slog.KindGroup { // recurse into group
313 g := val.Group()
314 if len(g) == 0 {
315 return
316 } else if attr.Key != "" { // inline empty keys
317 h.openGroup(attr.Key)
318 }
319 for _, a := range val.Group() {
320 h.attr(a)
321 }
322 if attr.Key != "" {
323 h.closeGroup()
324 }
325 return
326 } else if attr.Key == "" {
327 return
328 }
329 // TODO: grab any special keys
330
331 h.buf = append(h.buf, h.separator...)
332 h.separator = globalSep
333 h.buf = appendString(h.buf, attr.Key)
334 h.buf = append(h.buf, []byte(":")...)
335 switch val.Kind() {
336 case slog.KindAny:
337 b, _ := json.Marshal(val.Any())
338 h.buf = append(h.buf, b...)
339 case slog.KindBool:
340 h.buf = strconv.AppendBool(h.buf, val.Bool())
341 case slog.KindDuration:
342 h.buf = append(h.buf, `"`...)
343 h.buf = append(h.buf, val.Duration().String()...)
344 h.buf = append(h.buf, `"`...)
345 case slog.KindFloat64:
346 h.buf = strconv.AppendFloat(h.buf, val.Float64(), 'f', -1, 64)
347 case slog.KindInt64:
348 h.buf = strconv.AppendInt(h.buf, val.Int64(), 10)
349 case slog.KindString:
350 h.buf = appendString(h.buf, val.String())
351 case slog.KindTime:
352 h.buf = append(h.buf, `"`...)
353 h.buf = val.Time().AppendFormat(h.buf, time.RFC3339Nano)
354 h.buf = append(h.buf, `"`...)
355 case slog.KindUint64:
356 h.buf = strconv.AppendUint(h.buf, val.Uint64(), 10)
357 default:
358 panic("unhandled kind" + val.Kind().String())
359 }
360 h.confirmedLast = len(h.buf)
361}
362
363// json string encoder copied from encoding/json
364
365func appendString[Bytes []byte | string](dst []byte, src Bytes) []byte {
366 dst = append(dst, '"')
367 start := 0
368 for i := 0; i < len(src); {
369 if b := src[i]; b < utf8.RuneSelf {
370 if safeSet[b] {
371 i++
372 continue
373 }
374 dst = append(dst, src[start:i]...)
375 switch b {
376 case '\\', '"':
377 dst = append(dst, '\\', b)
378 case '\b':
379 dst = append(dst, '\\', 'b')
380 case '\f':
381 dst = append(dst, '\\', 'f')
382 case '\n':
383 dst = append(dst, '\\', 'n')
384 case '\r':
385 dst = append(dst, '\\', 'r')
386 case '\t':
387 dst = append(dst, '\\', 't')
388 default:
389 // This encodes bytes < 0x20 except for \b, \f, \n, \r and \t.
390 // If escapeHTML is set, it also escapes <, >, and &
391 // because they can lead to security holes when
392 // user-controlled strings are rendered into JSON
393 // and served to some browsers.
394 dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
395 }
396 i++
397 start = i
398 continue
399 }
400 // TODO(https://go.dev/issue/56948): Use generic utf8 functionality.
401 // For now, cast only a small portion of byte slices to a string
402 // so that it can be stack allocated. This slows down []byte slightly
403 // due to the extra copy, but keeps string performance roughly the same.
404 n := len(src) - i
405 if n > utf8.UTFMax {
406 n = utf8.UTFMax
407 }
408 c, size := utf8.DecodeRuneInString(string(src[i : i+n]))
409 if c == utf8.RuneError && size == 1 {
410 dst = append(dst, src[start:i]...)
411 dst = append(dst, `\ufffd`...)
412 i += size
413 start = i
414 continue
415 }
416 // U+2028 is LINE SEPARATOR.
417 // U+2029 is PARAGRAPH SEPARATOR.
418 // They are both technically valid characters in JSON strings,
419 // but don't work in JSONP, which has to be evaluated as JavaScript,
420 // and can lead to security holes there. It is valid JSON to
421 // escape them, so we do so unconditionally.
422 // See https://en.wikipedia.org/wiki/JSON#Safety.
423 if c == '\u2028' || c == '\u2029' {
424 dst = append(dst, src[start:i]...)
425 dst = append(dst, '\\', 'u', '2', '0', '2', hex[c&0xF])
426 i += size
427 start = i
428 continue
429 }
430 i += size
431 }
432 dst = append(dst, src[start:]...)
433 dst = append(dst, '"')
434 return dst
435}