增加日志配置选项支持,优化日志初始化与数据库日志记录

This commit is contained in:
2026-01-27 10:32:43 +08:00
parent 729d335a69
commit e6e6d3b222
11 changed files with 220 additions and 11 deletions

View File

@@ -21,7 +21,16 @@ BingPaper 支持通过配置文件YAML和环境变量进行配置。
- `base_url`: 服务的基础 URL用于生成某些绝对路径默认为空。
#### log (日志配置)
- `level`: 日志级别,可选 `debug`, `info`, `warn`, `error`,默认 `info`
- `level`: 业务日志级别,可选 `debug`, `info`, `warn`, `error`,默认 `info`
- `filename`: 业务日志输出文件路径,默认 `data/logs/app.log`
- `db_filename`: 数据库日志输出文件路径,默认 `data/logs/db.log`
- `max_size`: 日志文件切割大小 (MB),默认 `100`
- `max_backups`: 保留旧日志文件个数,默认 `3`
- `max_age`: 保留旧日志文件天数,默认 `7`
- `compress`: 是否压缩旧日志文件,默认 `true`
- `log_console`: 是否同时输出到控制台,默认 `true`
- `show_db_log`: 是否在控制台输出数据库日志SQL默认 `false`
- `db_log_level`: 数据库日志级别,可选 `debug`, `info`, `warn`, `error`, `silent``debug`/`info` 会记录所有 SQL。默认 `info`
#### api (API 模式)
- `mode`: API 行为模式。

View File

@@ -4,6 +4,15 @@ server:
log:
level: info
filename: data/logs/app.log
db_filename: data/logs/db.log
max_size: 100
max_backups: 3
max_age: 7
compress: true
log_console: true
show_db_log: false
db_log_level: info
api:
mode: local # local | redirect

1
go.mod
View File

@@ -105,6 +105,7 @@ require (
golang.org/x/text v0.33.0 // indirect
golang.org/x/tools v0.40.0 // indirect
google.golang.org/protobuf v1.36.9 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/libc v1.22.5 // indirect

2
go.sum
View File

@@ -267,6 +267,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=

View File

@@ -34,7 +34,7 @@ func Init(webFS embed.FS, configPath string) *gin.Engine {
cfg := config.GetConfig()
// 2. 初始化日志
util.InitLogger(cfg.Log.Level)
util.InitLogger(cfg.Log)
// 输出配置信息
util.Logger.Info("Application configuration loaded")

View File

@@ -31,9 +31,29 @@ type ServerConfig struct {
}
type LogConfig struct {
Level string `mapstructure:"level"`
Level string `mapstructure:"level"`
Filename string `mapstructure:"filename"` // 业务日志文件名
DBFilename string `mapstructure:"db_filename"` // 数据库日志文件名
MaxSize int `mapstructure:"max_size"` // 每个日志文件最大大小 (MB)
MaxBackups int `mapstructure:"max_backups"` // 保留旧日志文件最大个数
MaxAge int `mapstructure:"max_age"` // 保留旧日志文件最大天数
Compress bool `mapstructure:"compress"` // 是否压缩旧日志文件
LogConsole bool `mapstructure:"log_console"` // 是否同时输出到控制台
ShowDBLog bool `mapstructure:"show_db_log"` // 是否在控制台显示数据库日志
DBLogLevel string `mapstructure:"db_log_level"` // 数据库日志级别: debug, info, warn, error
}
func (c LogConfig) GetLevel() string { return c.Level }
func (c LogConfig) GetFilename() string { return c.Filename }
func (c LogConfig) GetDBFilename() string { return c.DBFilename }
func (c LogConfig) GetMaxSize() int { return c.MaxSize }
func (c LogConfig) GetMaxBackups() int { return c.MaxBackups }
func (c LogConfig) GetMaxAge() int { return c.MaxAge }
func (c LogConfig) GetCompress() bool { return c.Compress }
func (c LogConfig) GetLogConsole() bool { return c.LogConsole }
func (c LogConfig) GetShowDBLog() bool { return c.ShowDBLog }
func (c LogConfig) GetDBLogLevel() string { return c.DBLogLevel }
type APIConfig struct {
Mode string `mapstructure:"mode"` // local | redirect
}
@@ -122,6 +142,15 @@ func Init(configPath string) error {
v.SetDefault("server.port", 8080)
v.SetDefault("log.level", "info")
v.SetDefault("log.filename", "data/logs/app.log")
v.SetDefault("log.db_filename", "data/logs/db.log")
v.SetDefault("log.max_size", 100)
v.SetDefault("log.max_backups", 3)
v.SetDefault("log.max_age", 7)
v.SetDefault("log.compress", true)
v.SetDefault("log.log_console", true)
v.SetDefault("log.show_db_log", false)
v.SetDefault("log.db_log_level", "info")
v.SetDefault("api.mode", "local")
v.SetDefault("cron.enabled", true)
v.SetDefault("cron.daily_spec", "0 10 * * *")

View File

@@ -10,8 +10,10 @@ import (
"BingPaper/internal/model"
"BingPaper/internal/service/image"
"BingPaper/internal/storage"
"BingPaper/internal/util"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
)
// GetToday 获取今日图片
@@ -187,6 +189,7 @@ func handleImageResponse(c *gin.Context, img *model.Image) {
func serveLocal(c *gin.Context, key string) {
reader, contentType, err := storage.GlobalStorage.Get(context.Background(), key)
if err != nil {
util.Logger.Error("Failed to get image from storage", zap.String("key", key), zap.Error(err))
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get image"})
return
}

View File

@@ -4,7 +4,9 @@ import (
"BingPaper/internal/config"
"BingPaper/internal/model"
"BingPaper/internal/util"
"context"
"fmt"
"time"
"github.com/glebarez/sqlite"
"go.uber.org/zap"
@@ -16,6 +18,64 @@ import (
var DB *gorm.DB
type gormLogger struct {
ZapLogger *zap.Logger
LogLevel logger.LogLevel
}
func (l *gormLogger) LogMode(level logger.LogLevel) logger.Interface {
return &gormLogger{
ZapLogger: l.ZapLogger,
LogLevel: level,
}
}
func (l *gormLogger) Info(ctx context.Context, msg string, data ...interface{}) {
if l.LogLevel >= logger.Info {
l.ZapLogger.Sugar().Infof(msg, data...)
}
}
func (l *gormLogger) Warn(ctx context.Context, msg string, data ...interface{}) {
if l.LogLevel >= logger.Warn {
l.ZapLogger.Sugar().Warnf(msg, data...)
}
}
func (l *gormLogger) Error(ctx context.Context, msg string, data ...interface{}) {
if l.LogLevel >= logger.Error {
l.ZapLogger.Sugar().Errorf(msg, data...)
}
}
func (l *gormLogger) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {
if l.LogLevel <= 0 {
return
}
elapsed := time.Since(begin)
sql, rows := fc()
if err != nil && l.LogLevel >= logger.Error {
l.ZapLogger.Error("SQL ERROR",
zap.Error(err),
zap.Duration("elapsed", elapsed),
zap.Int64("rows", rows),
zap.String("sql", sql),
)
} else if elapsed > 200*time.Millisecond && l.LogLevel >= logger.Warn {
l.ZapLogger.Warn("SLOW SQL",
zap.Duration("elapsed", elapsed),
zap.Int64("rows", rows),
zap.String("sql", sql),
)
} else if l.LogLevel >= logger.Info {
l.ZapLogger.Info("SQL",
zap.Duration("elapsed", elapsed),
zap.Int64("rows", rows),
zap.String("sql", sql),
)
}
}
func InitDB() error {
cfg := config.GetConfig()
var dialector gorm.Dialector
@@ -31,8 +91,25 @@ func InitDB() error {
return fmt.Errorf("unsupported db type: %s", cfg.DB.Type)
}
gormLogLevel := logger.Info
switch cfg.Log.DBLogLevel {
case "debug":
gormLogLevel = logger.Info // GORM 的 Info 级会输出所有 SQL
case "info":
gormLogLevel = logger.Info
case "warn":
gormLogLevel = logger.Warn
case "error":
gormLogLevel = logger.Error
case "silent":
gormLogLevel = logger.Silent
}
gormConfig := &gorm.Config{
Logger: logger.Default.LogMode(logger.Info),
Logger: &gormLogger{
ZapLogger: util.DBLogger,
LogLevel: gormLogLevel,
},
DisableForeignKeyConstraintWhenMigrating: true,
}

View File

@@ -55,17 +55,22 @@ func NewFetcher() *Fetcher {
func (f *Fetcher) Fetch(ctx context.Context, n int) error {
util.Logger.Info("Starting fetch task", zap.Int("n", n))
url := fmt.Sprintf("%s?format=js&idx=0&n=%d&uhd=1&mkt=%s", config.BingAPIBase, n, config.BingMkt)
util.Logger.Debug("Requesting Bing API", zap.String("url", url))
resp, err := f.httpClient.Get(url)
if err != nil {
util.Logger.Error("Failed to request Bing API", zap.Error(err))
return err
}
defer resp.Body.Close()
var bingResp BingResponse
if err := json.NewDecoder(resp.Body).Decode(&bingResp); err != nil {
util.Logger.Error("Failed to decode Bing API response", zap.Error(err))
return err
}
util.Logger.Info("Fetched images from Bing", zap.Int("count", len(bingResp.Images)))
for _, bingImg := range bingResp.Images {
if err := f.processImage(ctx, bingImg); err != nil {
util.Logger.Error("Failed to process image", zap.String("date", bingImg.Enddate), zap.Error(err))
@@ -93,12 +98,14 @@ func (f *Fetcher) processImage(ctx context.Context, bingImg BingImage) error {
imgData, err := f.downloadImage(imgURL)
if err != nil {
util.Logger.Error("Failed to download image", zap.String("url", imgURL), zap.Error(err))
return err
}
// 解码图片用于缩放
srcImg, _, err := image.Decode(bytes.NewReader(imgData))
if err != nil {
util.Logger.Error("Failed to decode image data", zap.Error(err))
return err
}
@@ -115,6 +122,7 @@ func (f *Fetcher) processImage(ctx context.Context, bingImg BingImage) error {
Columns: []clause.Column{{Name: "date"}},
DoNothing: true,
}).Create(&dbImg).Error; err != nil {
util.Logger.Error("Failed to create image record", zap.Error(err))
return err
}
@@ -122,6 +130,7 @@ func (f *Fetcher) processImage(ctx context.Context, bingImg BingImage) error {
if dbImg.ID == 0 {
var existing model.Image
if err := repo.DB.Where("date = ?", dateStr).First(&existing).Error; err != nil {
util.Logger.Error("Failed to query existing image record after conflict", zap.Error(err))
return err
}
dbImg = existing

View File

@@ -25,6 +25,7 @@ func CleanupOldImages(ctx context.Context) error {
var images []model.Image
if err := repo.DB.Where("date < ?", threshold).Preload("Variants").Find(&images).Error; err != nil {
util.Logger.Error("Failed to query old images for cleanup", zap.Error(err))
return err
}

View File

@@ -2,14 +2,61 @@ package util
import (
"os"
"path/filepath"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
)
var Logger *zap.Logger
var DBLogger *zap.Logger
func InitLogger(level string) {
// LogConfig 定义日志配置接口,避免循环依赖
type LogConfig interface {
GetLevel() string
GetFilename() string
GetDBFilename() string
GetMaxSize() int
GetMaxBackups() int
GetMaxAge() int
GetCompress() bool
GetLogConsole() bool
GetShowDBLog() bool
GetDBLogLevel() string
}
func InitLogger(cfg LogConfig) {
// 确保日志目录存在
if cfg.GetFilename() != "" {
_ = os.MkdirAll(filepath.Dir(cfg.GetFilename()), 0755)
}
if cfg.GetDBFilename() != "" {
_ = os.MkdirAll(filepath.Dir(cfg.GetDBFilename()), 0755)
}
Logger = createZapLogger(
cfg.GetLevel(),
cfg.GetFilename(),
cfg.GetMaxSize(),
cfg.GetMaxBackups(),
cfg.GetMaxAge(),
cfg.GetCompress(),
cfg.GetLogConsole(),
)
DBLogger = createZapLogger(
cfg.GetDBLogLevel(),
cfg.GetDBFilename(),
cfg.GetMaxSize(),
cfg.GetMaxBackups(),
cfg.GetMaxAge(),
cfg.GetCompress(),
cfg.GetShowDBLog(),
)
}
func createZapLogger(level, filename string, maxSize, maxBackups, maxAge int, compress, logConsole bool) *zap.Logger {
var zapLevel zapcore.Level
switch level {
case "debug":
@@ -28,11 +75,33 @@ func InitLogger(level string) {
encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
core := zapcore.NewCore(
zapcore.NewConsoleEncoder(encoderConfig),
zapcore.AddSync(os.Stdout),
zapLevel,
)
var cores []zapcore.Core
Logger = zap.New(core, zap.AddCaller())
// 文件输出
if filename != "" {
w := zapcore.AddSync(&lumberjack.Logger{
Filename: filename,
MaxSize: maxSize,
MaxBackups: maxBackups,
MaxAge: maxAge,
Compress: compress,
})
cores = append(cores, zapcore.NewCore(
zapcore.NewConsoleEncoder(encoderConfig),
w,
zapLevel,
))
}
// 控制台输出
if logConsole {
cores = append(cores, zapcore.NewCore(
zapcore.NewConsoleEncoder(encoderConfig),
zapcore.AddSync(os.Stdout),
zapLevel,
))
}
core := zapcore.NewTee(cores...)
return zap.New(core, zap.AddCaller())
}