Documentation
¶
Overview ¶
Copied from: https://github.com/ThreeDotsLabs/watermill/blob/master/slog.go
Index ¶
- Constants
- Variables
- func NewSlogLogger(logger *slog.Logger) api.LoggerAdapter
- func NewSlogLoggerWithLevelMapping(logger *slog.Logger, shardLevelToSlog map[slog.Level]slog.Level) api.LoggerAdapter
- func Run(cfg *Config) error
- type Config
- type Option
- func WithCombiner(combiner api.Combiner) Option
- func WithFilesystem(storer api.Filesystem) Option
- func WithInputPath(path string) Option
- func WithLogger(logger api.LoggerAdapter) Option
- func WithMapSplitSize(size int64) Option
- func WithMapper(mapper api.Mapper) Option
- func WithMasterAddress(addr string) Option
- func WithMaxConcurrency(limit int) Option
- func WithNumReducers(partitions int) Option
- func WithOutputDir(path string) Option
- func WithPartitioner(partitioner api.Partitioner) Option
- func WithReducer(reducer api.Reducer) Option
- type SlogLoggerAdapter
- func (s *SlogLoggerAdapter) Debug(msg string, fields api.LogFields)
- func (s *SlogLoggerAdapter) Error(msg string, err error, fields api.LogFields)
- func (s *SlogLoggerAdapter) Info(msg string, fields api.LogFields)
- func (s *SlogLoggerAdapter) Trace(msg string, fields api.LogFields)
- func (s *SlogLoggerAdapter) With(fields api.LogFields) api.LoggerAdapter
Constants ¶
const ( DefaultMasterAddress = "localhost:6969" DefaultOutputDir = "./shard" DefaultNumReducers = 16 DefaultChunkSize = 64 * 1024 * 1024 // 64MB )
const LevelTrace = slog.LevelDebug - 4
LevelTrace must be added, because slog package does not have one by default. Generate it by subtracting 4 levels from slog.Debug following the example of slog.LevelWarn and slog.LevelError which are set to 4 and 8.
Variables ¶
var DefaultMaxConcurrency = runtime.NumCPU() * 2
DefaultMaxConcurrency is the maximum number of tasks that a worker can execute concurrently.
Functions ¶
func NewSlogLogger ¶
func NewSlogLogger(logger *slog.Logger) api.LoggerAdapter
NewSlogLogger creates an adapter to the standard library's structured logging package. A `nil` logger is substituted for the result of slog.Default.
func NewSlogLoggerWithLevelMapping ¶
func NewSlogLoggerWithLevelMapping( logger *slog.Logger, shardLevelToSlog map[slog.Level]slog.Level, ) api.LoggerAdapter
NewSlogLoggerWithLevelMapping creates an adapter to the standard library's structured logging package. A `nil` logger is substituted for the result of slog.Default.
The `shardLevelToSlog` parameter is a map that maps Watermill's log levels to the levels of the structured logger. It's helpful, when want to for example log Watermill's info logs as debug in slog.
Types ¶
type Config ¶
type Config struct {
// MasterAddress is the RPC address (host:port) of the coordinator service
// where worker will register.
//
// If not specified, it defaults to DefaultMasterAddress.
MasterAddress string
// InputPath specifies the file or directory pattern (glob) to be processed
// by the Map phase.
InputPath string
// OutputDir is the directory where intermediate files and final Reduce
// outputs will be stored.
//
// If not specified, it defaults to DefaultOutputDir.
OutputDir string
// NumReducers is the number of reduce tasks (R). This determines the
// number of output partitions.
//
// If not specified, it default to DefaultNumReducers.
NumReducers int
// ChunkSize is the maximum size in bytes for a single input split given
// to a Mapper.
//
// If not specified, it defaults to DefaultChunkSize.
ChunkSize int64
// MaxConcurrency limits the number of concurrent tasks that can be
// processed by a single worker.
//
// If not specified, it defaults to DefaultMaxConcurrency.
MaxConcurrency int
// Mapper is the client provided implementation of the Map function.
Mapper api.Mapper
// Reducer is the client provided implementation of the Reduce function.
Reducer api.Reducer
// Combiner is the client provided implementation of the Combine function.
Combiner api.Combiner
// Partitioner determines which reducer handles a specific key. If nil,
// a default hash-based partitioner is typically applied.
Partitioner api.Partitioner
// Filesystem handles the abstraction of reading and writing files (e.g.,
// wrapping local disk IO or cloud storage calls).
Filesystem api.Filesystem
// Logger is an interface that the logger (e.g., slog, zlog) should satisfy.
Logger api.LoggerAdapter
// contains filtered or unexported fields
}
Config holds the runtime configuration for the Shard library. It defines infrastructure settings and the core processing logic.
type Option ¶
type Option func(*Config)
func WithCombiner ¶
func WithFilesystem ¶
func WithFilesystem(storer api.Filesystem) Option
func WithInputPath ¶
func WithLogger ¶
func WithLogger(logger api.LoggerAdapter) Option
func WithMapSplitSize ¶
func WithMapper ¶
func WithMasterAddress ¶
func WithMaxConcurrency ¶
func WithNumReducers ¶
func WithOutputDir ¶
func WithPartitioner ¶
func WithPartitioner(partitioner api.Partitioner) Option
func WithReducer ¶
type SlogLoggerAdapter ¶
type SlogLoggerAdapter struct {
// contains filtered or unexported fields
}
SlogLoggerAdapter wraps slog.Logger.
func (*SlogLoggerAdapter) Debug ¶
func (s *SlogLoggerAdapter) Debug(msg string, fields api.LogFields)
Debug logs a message to slog.LevelDebug.
func (*SlogLoggerAdapter) Error ¶
func (s *SlogLoggerAdapter) Error(msg string, err error, fields api.LogFields)
Error logs a message to slog.LevelError.
func (*SlogLoggerAdapter) Info ¶
func (s *SlogLoggerAdapter) Info(msg string, fields api.LogFields)
Info logs a message to slog.LevelInfo.
func (*SlogLoggerAdapter) Trace ¶
func (s *SlogLoggerAdapter) Trace(msg string, fields api.LogFields)
Trace logs a message to LevelTrace.
func (*SlogLoggerAdapter) With ¶
func (s *SlogLoggerAdapter) With(fields api.LogFields) api.LoggerAdapter
With return a SlogLoggerAdapter with a set of fields injected into all consequent logging messages.