mirror of
https://git.fightbot.fun/hxuanyu/BingPaper.git
synced 2026-02-15 07:19:33 +08:00
增加日志配置选项支持,优化日志初始化与数据库日志记录
This commit is contained in:
@@ -34,7 +34,7 @@ func Init(webFS embed.FS, configPath string) *gin.Engine {
|
||||
cfg := config.GetConfig()
|
||||
|
||||
// 2. 初始化日志
|
||||
util.InitLogger(cfg.Log.Level)
|
||||
util.InitLogger(cfg.Log)
|
||||
|
||||
// 输出配置信息
|
||||
util.Logger.Info("Application configuration loaded")
|
||||
|
||||
@@ -31,9 +31,29 @@ type ServerConfig struct {
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
Level string `mapstructure:"level"`
|
||||
Level string `mapstructure:"level"`
|
||||
Filename string `mapstructure:"filename"` // 业务日志文件名
|
||||
DBFilename string `mapstructure:"db_filename"` // 数据库日志文件名
|
||||
MaxSize int `mapstructure:"max_size"` // 每个日志文件最大大小 (MB)
|
||||
MaxBackups int `mapstructure:"max_backups"` // 保留旧日志文件最大个数
|
||||
MaxAge int `mapstructure:"max_age"` // 保留旧日志文件最大天数
|
||||
Compress bool `mapstructure:"compress"` // 是否压缩旧日志文件
|
||||
LogConsole bool `mapstructure:"log_console"` // 是否同时输出到控制台
|
||||
ShowDBLog bool `mapstructure:"show_db_log"` // 是否在控制台显示数据库日志
|
||||
DBLogLevel string `mapstructure:"db_log_level"` // 数据库日志级别: debug, info, warn, error
|
||||
}
|
||||
|
||||
func (c LogConfig) GetLevel() string { return c.Level }
|
||||
func (c LogConfig) GetFilename() string { return c.Filename }
|
||||
func (c LogConfig) GetDBFilename() string { return c.DBFilename }
|
||||
func (c LogConfig) GetMaxSize() int { return c.MaxSize }
|
||||
func (c LogConfig) GetMaxBackups() int { return c.MaxBackups }
|
||||
func (c LogConfig) GetMaxAge() int { return c.MaxAge }
|
||||
func (c LogConfig) GetCompress() bool { return c.Compress }
|
||||
func (c LogConfig) GetLogConsole() bool { return c.LogConsole }
|
||||
func (c LogConfig) GetShowDBLog() bool { return c.ShowDBLog }
|
||||
func (c LogConfig) GetDBLogLevel() string { return c.DBLogLevel }
|
||||
|
||||
type APIConfig struct {
|
||||
Mode string `mapstructure:"mode"` // local | redirect
|
||||
}
|
||||
@@ -122,6 +142,15 @@ func Init(configPath string) error {
|
||||
|
||||
v.SetDefault("server.port", 8080)
|
||||
v.SetDefault("log.level", "info")
|
||||
v.SetDefault("log.filename", "data/logs/app.log")
|
||||
v.SetDefault("log.db_filename", "data/logs/db.log")
|
||||
v.SetDefault("log.max_size", 100)
|
||||
v.SetDefault("log.max_backups", 3)
|
||||
v.SetDefault("log.max_age", 7)
|
||||
v.SetDefault("log.compress", true)
|
||||
v.SetDefault("log.log_console", true)
|
||||
v.SetDefault("log.show_db_log", false)
|
||||
v.SetDefault("log.db_log_level", "info")
|
||||
v.SetDefault("api.mode", "local")
|
||||
v.SetDefault("cron.enabled", true)
|
||||
v.SetDefault("cron.daily_spec", "0 10 * * *")
|
||||
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
"BingPaper/internal/model"
|
||||
"BingPaper/internal/service/image"
|
||||
"BingPaper/internal/storage"
|
||||
"BingPaper/internal/util"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// GetToday 获取今日图片
|
||||
@@ -187,6 +189,7 @@ func handleImageResponse(c *gin.Context, img *model.Image) {
|
||||
func serveLocal(c *gin.Context, key string) {
|
||||
reader, contentType, err := storage.GlobalStorage.Get(context.Background(), key)
|
||||
if err != nil {
|
||||
util.Logger.Error("Failed to get image from storage", zap.String("key", key), zap.Error(err))
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get image"})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"BingPaper/internal/config"
|
||||
"BingPaper/internal/model"
|
||||
"BingPaper/internal/util"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"go.uber.org/zap"
|
||||
@@ -16,6 +18,64 @@ import (
|
||||
|
||||
var DB *gorm.DB
|
||||
|
||||
type gormLogger struct {
|
||||
ZapLogger *zap.Logger
|
||||
LogLevel logger.LogLevel
|
||||
}
|
||||
|
||||
func (l *gormLogger) LogMode(level logger.LogLevel) logger.Interface {
|
||||
return &gormLogger{
|
||||
ZapLogger: l.ZapLogger,
|
||||
LogLevel: level,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *gormLogger) Info(ctx context.Context, msg string, data ...interface{}) {
|
||||
if l.LogLevel >= logger.Info {
|
||||
l.ZapLogger.Sugar().Infof(msg, data...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *gormLogger) Warn(ctx context.Context, msg string, data ...interface{}) {
|
||||
if l.LogLevel >= logger.Warn {
|
||||
l.ZapLogger.Sugar().Warnf(msg, data...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *gormLogger) Error(ctx context.Context, msg string, data ...interface{}) {
|
||||
if l.LogLevel >= logger.Error {
|
||||
l.ZapLogger.Sugar().Errorf(msg, data...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *gormLogger) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
if l.LogLevel <= 0 {
|
||||
return
|
||||
}
|
||||
elapsed := time.Since(begin)
|
||||
sql, rows := fc()
|
||||
if err != nil && l.LogLevel >= logger.Error {
|
||||
l.ZapLogger.Error("SQL ERROR",
|
||||
zap.Error(err),
|
||||
zap.Duration("elapsed", elapsed),
|
||||
zap.Int64("rows", rows),
|
||||
zap.String("sql", sql),
|
||||
)
|
||||
} else if elapsed > 200*time.Millisecond && l.LogLevel >= logger.Warn {
|
||||
l.ZapLogger.Warn("SLOW SQL",
|
||||
zap.Duration("elapsed", elapsed),
|
||||
zap.Int64("rows", rows),
|
||||
zap.String("sql", sql),
|
||||
)
|
||||
} else if l.LogLevel >= logger.Info {
|
||||
l.ZapLogger.Info("SQL",
|
||||
zap.Duration("elapsed", elapsed),
|
||||
zap.Int64("rows", rows),
|
||||
zap.String("sql", sql),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func InitDB() error {
|
||||
cfg := config.GetConfig()
|
||||
var dialector gorm.Dialector
|
||||
@@ -31,8 +91,25 @@ func InitDB() error {
|
||||
return fmt.Errorf("unsupported db type: %s", cfg.DB.Type)
|
||||
}
|
||||
|
||||
gormLogLevel := logger.Info
|
||||
switch cfg.Log.DBLogLevel {
|
||||
case "debug":
|
||||
gormLogLevel = logger.Info // GORM 的 Info 级会输出所有 SQL
|
||||
case "info":
|
||||
gormLogLevel = logger.Info
|
||||
case "warn":
|
||||
gormLogLevel = logger.Warn
|
||||
case "error":
|
||||
gormLogLevel = logger.Error
|
||||
case "silent":
|
||||
gormLogLevel = logger.Silent
|
||||
}
|
||||
|
||||
gormConfig := &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Info),
|
||||
Logger: &gormLogger{
|
||||
ZapLogger: util.DBLogger,
|
||||
LogLevel: gormLogLevel,
|
||||
},
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
}
|
||||
|
||||
|
||||
@@ -55,17 +55,22 @@ func NewFetcher() *Fetcher {
|
||||
func (f *Fetcher) Fetch(ctx context.Context, n int) error {
|
||||
util.Logger.Info("Starting fetch task", zap.Int("n", n))
|
||||
url := fmt.Sprintf("%s?format=js&idx=0&n=%d&uhd=1&mkt=%s", config.BingAPIBase, n, config.BingMkt)
|
||||
util.Logger.Debug("Requesting Bing API", zap.String("url", url))
|
||||
resp, err := f.httpClient.Get(url)
|
||||
if err != nil {
|
||||
util.Logger.Error("Failed to request Bing API", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var bingResp BingResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&bingResp); err != nil {
|
||||
util.Logger.Error("Failed to decode Bing API response", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
util.Logger.Info("Fetched images from Bing", zap.Int("count", len(bingResp.Images)))
|
||||
|
||||
for _, bingImg := range bingResp.Images {
|
||||
if err := f.processImage(ctx, bingImg); err != nil {
|
||||
util.Logger.Error("Failed to process image", zap.String("date", bingImg.Enddate), zap.Error(err))
|
||||
@@ -93,12 +98,14 @@ func (f *Fetcher) processImage(ctx context.Context, bingImg BingImage) error {
|
||||
|
||||
imgData, err := f.downloadImage(imgURL)
|
||||
if err != nil {
|
||||
util.Logger.Error("Failed to download image", zap.String("url", imgURL), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// 解码图片用于缩放
|
||||
srcImg, _, err := image.Decode(bytes.NewReader(imgData))
|
||||
if err != nil {
|
||||
util.Logger.Error("Failed to decode image data", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -115,6 +122,7 @@ func (f *Fetcher) processImage(ctx context.Context, bingImg BingImage) error {
|
||||
Columns: []clause.Column{{Name: "date"}},
|
||||
DoNothing: true,
|
||||
}).Create(&dbImg).Error; err != nil {
|
||||
util.Logger.Error("Failed to create image record", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -122,6 +130,7 @@ func (f *Fetcher) processImage(ctx context.Context, bingImg BingImage) error {
|
||||
if dbImg.ID == 0 {
|
||||
var existing model.Image
|
||||
if err := repo.DB.Where("date = ?", dateStr).First(&existing).Error; err != nil {
|
||||
util.Logger.Error("Failed to query existing image record after conflict", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
dbImg = existing
|
||||
|
||||
@@ -25,6 +25,7 @@ func CleanupOldImages(ctx context.Context) error {
|
||||
|
||||
var images []model.Image
|
||||
if err := repo.DB.Where("date < ?", threshold).Preload("Variants").Find(&images).Error; err != nil {
|
||||
util.Logger.Error("Failed to query old images for cleanup", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,14 +2,61 @@ package util
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var Logger *zap.Logger
|
||||
var DBLogger *zap.Logger
|
||||
|
||||
func InitLogger(level string) {
|
||||
// LogConfig 定义日志配置接口,避免循环依赖
|
||||
type LogConfig interface {
|
||||
GetLevel() string
|
||||
GetFilename() string
|
||||
GetDBFilename() string
|
||||
GetMaxSize() int
|
||||
GetMaxBackups() int
|
||||
GetMaxAge() int
|
||||
GetCompress() bool
|
||||
GetLogConsole() bool
|
||||
GetShowDBLog() bool
|
||||
GetDBLogLevel() string
|
||||
}
|
||||
|
||||
func InitLogger(cfg LogConfig) {
|
||||
// 确保日志目录存在
|
||||
if cfg.GetFilename() != "" {
|
||||
_ = os.MkdirAll(filepath.Dir(cfg.GetFilename()), 0755)
|
||||
}
|
||||
if cfg.GetDBFilename() != "" {
|
||||
_ = os.MkdirAll(filepath.Dir(cfg.GetDBFilename()), 0755)
|
||||
}
|
||||
|
||||
Logger = createZapLogger(
|
||||
cfg.GetLevel(),
|
||||
cfg.GetFilename(),
|
||||
cfg.GetMaxSize(),
|
||||
cfg.GetMaxBackups(),
|
||||
cfg.GetMaxAge(),
|
||||
cfg.GetCompress(),
|
||||
cfg.GetLogConsole(),
|
||||
)
|
||||
|
||||
DBLogger = createZapLogger(
|
||||
cfg.GetDBLogLevel(),
|
||||
cfg.GetDBFilename(),
|
||||
cfg.GetMaxSize(),
|
||||
cfg.GetMaxBackups(),
|
||||
cfg.GetMaxAge(),
|
||||
cfg.GetCompress(),
|
||||
cfg.GetShowDBLog(),
|
||||
)
|
||||
}
|
||||
|
||||
func createZapLogger(level, filename string, maxSize, maxBackups, maxAge int, compress, logConsole bool) *zap.Logger {
|
||||
var zapLevel zapcore.Level
|
||||
switch level {
|
||||
case "debug":
|
||||
@@ -28,11 +75,33 @@ func InitLogger(level string) {
|
||||
encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||
|
||||
core := zapcore.NewCore(
|
||||
zapcore.NewConsoleEncoder(encoderConfig),
|
||||
zapcore.AddSync(os.Stdout),
|
||||
zapLevel,
|
||||
)
|
||||
var cores []zapcore.Core
|
||||
|
||||
Logger = zap.New(core, zap.AddCaller())
|
||||
// 文件输出
|
||||
if filename != "" {
|
||||
w := zapcore.AddSync(&lumberjack.Logger{
|
||||
Filename: filename,
|
||||
MaxSize: maxSize,
|
||||
MaxBackups: maxBackups,
|
||||
MaxAge: maxAge,
|
||||
Compress: compress,
|
||||
})
|
||||
cores = append(cores, zapcore.NewCore(
|
||||
zapcore.NewConsoleEncoder(encoderConfig),
|
||||
w,
|
||||
zapLevel,
|
||||
))
|
||||
}
|
||||
|
||||
// 控制台输出
|
||||
if logConsole {
|
||||
cores = append(cores, zapcore.NewCore(
|
||||
zapcore.NewConsoleEncoder(encoderConfig),
|
||||
zapcore.AddSync(os.Stdout),
|
||||
zapLevel,
|
||||
))
|
||||
}
|
||||
|
||||
core := zapcore.NewTee(cores...)
|
||||
return zap.New(core, zap.AddCaller())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user