diff --git a/README.md b/README.md index 5f9cb13..f981192 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,99 @@ # loggeradapter -Golang 日志适配器,实现了日志了轮转、备份和压缩,可与常见日志组件如:zap logger、logrus 等结合。 + +Golang 日志适配器,实现了日志轮转、备份和压缩归档、及按指定策略删除备份和归档文件的功能,可与常见日志组件如:zap logger、logrus 等结合。 + +## 使用示例 + +- 导入包 + +```go + go get -u github.com/fsjobwd/loggeradapter +``` + +- 与 zap logger 结合 + +```go + zapcore.AddSync(loggeradapter.New(loggeradapter.Config{ + Filename: "logs/log.log", + Rotation: "50mb", + Backup: "1w", + Archive: "1M", + })) +``` + +- 与 logrus 结合 + +```go + logrus.SetOutput(loggeradapter.New(loggeradapter.Config{ + Filename: "logs/log.log", + Rotation: "50mb", + Backup: "1w", + Archive: "1M", + })) +``` + +## 参数说明 + +- Filename + + 指定日志输出文件路径及名称,不提供时默认为当前目录下 `./logs/log.log`。 + + 最新日志始终输出到该文件中,当该文件内容达到指定的最大文件大小或到达指定时间时(由参数 `Rotation` 指定的轮转策略确定), + 将被重命名为备份文件(备份文件名称由参数 `Backup` 确定),然后生成一个相同名称的文件,并将最新日志输出到该新文件中。 + +- Rotation + + 文件轮转备份策略,支持以下格式的配置: + + (1). `b|byte|kb|kilobyte|mb|megabyte|gb|gigabyte|tb|terabyte` + (2). `y|year|M|month|mo|mon|w|week|d|day|h|hour|m|minute|min|s|second` + (3). `annually|monthly|weekly|daily|hourly|minutely|secondly` + + _解释_: 以上三种配置方式中,`(1)` 和 `(2)` 配置必须前面有数字,如:10mb、2year,且配置值不区分大小写。其中: + `b|byte|kb|kilobyte|mb|megabyte|gb|gigabyte|tb|terabyte` + 为按照文件大小轮转备份日志,如:10b、10byte、10Byte、10BYTE 都为文件大小达到 10 字节时轮转备份新日志文件,最大支持到 TB 级别。 + 此时日志备份文件名称为指定的 `Filename` 连接上 `2006-01-02T15-04-05.000` 格式,如:`logs/log-2024-01-01T10-10-10.123.log`。 + `y|year|M|month|mo|mon|w|week|d|day|h|hour|m|minute|min|s|second` + 为按照时间轮转备份日志,如:1y、1year、1YEAR、1Year 都为 1 年生成一个新的备份文件,此时备份文件名称类似: `logs/log-2024.log`。 + 比如指定为 1h、1hour、1HOUR、1Hour,则 1 小时生成一个新的备份文件,此时备份文件名称类似:`logs/log-2024-01-01T10.log`。 + `annually|monthly|weekly|daily|hourly|minutely|secondly` + 为按照时间周期轮转备份日志,如设置为 `annually`或 `Annually` 时,则每 1 年生成一个新的备份文件,此时和 1y、1year、1YEAR、1Year 具有相同的作用,其生成的文件名称类似:`logs/log-2024.log`。 + 若设置为 `monthly` 或 `Monthly` 时,则每 1 月生成一个新的备份文件,此时和 1M、1month、1mo、1mon 具有相同的作用,其生成的文件名称类似:`logs/log-2024-01.log`。 + _注意_:M 为月,m 为分钟!`M|month|mo|mon` 都为月,`m|minute|min` 都为分钟! + +- Backup + + 备份文件保留策略,支持以下格式的配置: + + (1). `y|year|M|month|mo|mon|w|week|d|day|h|hour|m|minute|min|s|second` + (2). `number` + + _解释_: 以上两种配置方式中,`(1)` 配置和 `Rotation` 的 `y|year|M|month|mo|mon|w|week|d|day|h|hour|m|minute|min|s|second` 配置方式相同。 + 意思为按照周期保留备份文件,达到该时长的备份文件将被压缩并归档(若指定了压缩归档策略),同时删除旧备份文件。 + 如指定为:1w、1week、1WEEK、1Week,则备份文件会保留 1 周,然后压缩并归档,同时删除旧备份文件。 + 以上配置 `(2)` 为指定单个数字,如设置为:10,则表示备份文件达到 10 个时,将压缩并归档,同时删除旧备份文件。 + +- Archive + + 压缩归档策略,支持以下格式的配置: + + (1). `y|year|M|month|mo|mon|w|week|d|day|h|hour|m|minute|min|s|second` + (2). `number` + + _解释_: 以上两种配置方式和 `Backup` 相同。为 `y|year|M|month|mo|mon|w|week|d|day|h|hour|m|minute|min|s|second` 时按照周期保留压缩归档文件,达到该时长的压缩归档文件将被删除。 + 如指定为:`1M、1month、mo、mon`,则压缩归档文件会保留 1 个月,然后删除 1 个月之前的所有压缩归档文件。 + 若指定为数字 `number` 时,则会最多保留该数量的压缩归档文件,多余的压缩归档文件将被删除。如指定为 10,则最多保留 10 个最新的压缩归档文件,其余的压缩归档文件将被删除。 + + _注意_:当该参数为空时,则不压缩归档文件,只会按照保留策略删除符合条件的旧文件! + +## 注意事项 + +若参数 `Rotation`、`Backup`、`Archive` 三个参数都为空时,则日志文件将不会轮转备份,也不会进行压缩归档,日志会持续不断的输出到指定日志文件中。 + +## 编写初衷 + +在实际项目开发中,使用过 zap logger、logrus、glog、zerolog 等日志组件,发现日志组件的日志轮转备份功能不足, 一般都是通过集成 `gopkg.in/natefinch/lumberjack.v2` 做日志轮转备份,而该组件只支持按照大小轮转备份,实际开发当中有按照时间周期轮转备份的需求,就需要自己编写实现,且配置不够灵活,无法满足实际需求,故自己做了封装实现,以便在其他不同项目中能够直接使用。 + +## 其他 + +欢迎各位大佬提意见、Issues、PR!🤝👊🫶 diff --git a/archiver.go b/archiver.go new file mode 100644 index 0000000..9fc38e5 --- /dev/null +++ b/archiver.go @@ -0,0 +1,401 @@ +package loggeradapter + +import ( + "archive/tar" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +const ( + defaultArchiveTimeFormat = "2006-01-02T15-04-05" + defaultArchiveSuffix = ".gz" +) + +type archiver struct { + backupValue, archiveValue int + backupUnit, archiveUnit string + + backupTimeFormat string + isBackupNumber, isArchiveNumber bool + backupDuration, archiveDuration time.Duration + + filename string + millCh chan bool + startArchive sync.Once +} + +func newArchiver(cfg Config) *archiver { + if cfg.Backup == "" || cfg.Archive == "" { + return nil + } + + backupValue, backupUnit, err := ParseExpression(cfg.Backup) + if err != nil { + panic(fmt.Sprintf("Parse backup expression failed. error: %v", err)) + } + + archiveValue, archiveUnit, err := ParseExpression(cfg.Archive) + if err != nil { + panic(fmt.Sprintf("Parse archive expression failed. error: %v", err)) + } + + rp := &archiver{ + filename: cfg.Filename, + backupValue: backupValue, + backupUnit: backupUnit, + archiveValue: archiveValue, + archiveUnit: archiveUnit, + backupTimeFormat: cfg.timeFormat, + } + + if backupUnit == "" && backupValue > 0 { + rp.isBackupNumber = true + } else { + rp.setBackupDuration() + } + + if archiveUnit == "" && archiveValue > 0 { + rp.isArchiveNumber = true + } else { + rp.setArchiveDuration() + } + + return rp +} + +func (a *archiver) setBackupDuration() { + if !IsDuration(a.backupUnit) { + return + } + + if IsYear(a.backupUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.backupValue*365*24*60*60)) + a.backupDuration = du + return + } + if IsMonth(a.backupUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.backupValue*30*24*60*60)) + a.backupDuration = du + return + } + if IsWeek(a.backupUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.backupValue*7*24*60*60)) + a.backupDuration = du + return + } + if IsDay(a.backupUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.backupValue*24*60*60)) + a.backupDuration = du + return + } + if IsHour(a.backupUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.backupValue*60*60)) + a.backupDuration = du + return + } + if IsMinute(a.backupUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.backupValue*60)) + a.backupDuration = du + return + } + if IsSecond(a.backupUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.backupValue)) + a.backupDuration = du + return + } +} + +func (a *archiver) setArchiveDuration() { + if !IsDuration(a.archiveUnit) { + return + } + + if IsYear(a.archiveUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.archiveValue*365*24*60*60)) + a.archiveDuration = du + return + } + if IsMonth(a.archiveUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.archiveValue*30*24*60*60)) + a.archiveDuration = du + return + } + if IsWeek(a.archiveUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.archiveValue*7*24*60*60)) + a.archiveDuration = du + return + } + if IsDay(a.archiveUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.archiveValue*24*60*60)) + a.archiveDuration = du + return + } + if IsHour(a.archiveUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.archiveValue*60*60)) + a.archiveDuration = du + return + } + if IsMinute(a.archiveUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.archiveValue*60)) + a.archiveDuration = du + return + } + if IsSecond(a.archiveUnit) { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", a.archiveValue)) + a.archiveDuration = du + return + } +} + +func (a *archiver) archive() { + a.startArchive.Do(func() { + a.millCh = make(chan bool, 1) + + go func() { + for range a.millCh { + if err := a.runArchive(); err != nil { + panic(fmt.Sprintf("Archive logs failed, error: %v", err)) + } + } + }() + }) + + select { + case a.millCh <- true: + default: + } +} + +func (a *archiver) runArchive() error { + logFiles, err := a.filterBackupFiles() + if err != nil { + return err + } + + if len(logFiles) == 0 { + return nil + } + + dir := filepath.Dir(a.filename) + gzipFilename := a.getGzipFilename() + + err = archiveCompress(gzipFilename, func(w *tar.Writer) error { + closeFile := func(f *os.File) error { + return f.Close() + } + + for _, f := range logFiles { + filename := filepath.Join(dir, f.Name()) + + logFile, err := os.Open(filename) + if err != nil { + return err + } + + header := &tar.Header{ + Name: f.Name(), + Mode: int64(f.Mode()), + Size: f.Size(), + } + + if err = w.WriteHeader(header); err != nil { + _ = closeFile(logFile) + return err + } + + if _, err = io.Copy(w, logFile); err != nil { + _ = closeFile(logFile) + return err + } + + if err = closeFile(logFile); err != nil { + return err + } + + _ = os.Remove(filename) + } + return nil + }) + if err != nil { + return err + } + + gzipFiles, _ := a.filterGzipFiles() + for _, f := range gzipFiles { + _ = os.Remove(filepath.Join(dir, f.Name())) + } + + return nil +} + +func archiveCompress(gzipFilename string, r func(w *tar.Writer) error) error { + gzipFile, err := openFile(gzipFilename) + if err != nil { + return err + } + defer func() { + if err := gzipFile.Close(); err != nil { + _ = os.Remove(gzipFilename) + } + }() + + gzipWriter := gzip.NewWriter(gzipFile) + defer func() { + //gzipWriter.Flush() + if err := gzipWriter.Close(); err != nil { + _ = os.Remove(gzipFilename) + } + }() + + tarWriter := tar.NewWriter(gzipWriter) + defer func() { + //tarWriter.Flush() + if err := tarWriter.Close(); err != nil { + _ = os.Remove(gzipFilename) + } + }() + + return r(tarWriter) +} + +func (a *archiver) filterBackupFiles() ([]logInfo, error) { + files, err := os.ReadDir(filepath.Dir(a.filename)) + if err != nil { + return nil, fmt.Errorf("can't read log file directory: %s", err) + } + var logFiles []logInfo + + prefix, ext := prefixAndExt(a.filename) + + for _, f := range files { + if f.IsDir() { + continue + } + + fileInfo, err := f.Info() + if err != nil { + continue + } + + // 根据备份策略确定要压缩那些文件 + if t, err := a.timeFromLogFilename(f.Name(), prefix, ext); err == nil { + logFiles = append(logFiles, logInfo{t, fileInfo}) + } + } + + sort.Sort(byFormatTime(logFiles)) + + if a.isBackupNumber { + if len(logFiles) >= a.backupValue { + return logFiles[:a.backupValue], nil + } + return nil, nil + } + + var filteredLogFiles []logInfo + now := time.Now() + end := now.Add(-a.backupDuration) + + for _, f := range logFiles { + if f.timestamp.Before(end) { + filteredLogFiles = append(filteredLogFiles, f) + } + } + + return filteredLogFiles, nil +} + +func (a *archiver) filterGzipFiles() ([]logInfo, error) { + files, err := os.ReadDir(filepath.Dir(a.filename)) + if err != nil { + return nil, fmt.Errorf("can't read log file directory: %s", err) + } + var gzipFiles []logInfo + + for _, f := range files { + if f.IsDir() { + continue + } + + fileInfo, err := f.Info() + if err != nil { + continue + } + + if t, err := a.timeFromGzipFilename(fileInfo.Name()); err == nil { + gzipFiles = append(gzipFiles, logInfo{t, fileInfo}) + } + } + + sort.Sort(byFormatTime(gzipFiles)) + + if a.isArchiveNumber { + if len(gzipFiles) >= a.archiveValue { + return gzipFiles[a.archiveValue:], nil + } + return nil, nil + } + + var filteredGzipFiles []logInfo + now := time.Now() + end := now.Add(-a.archiveDuration) + + for _, f := range gzipFiles { + if f.timestamp.Before(end) { + filteredGzipFiles = append(filteredGzipFiles, f) + } + } + + return filteredGzipFiles, nil +} + +func (a *archiver) getGzipFilename() string { + return filepath.Join(filepath.Dir(a.filename), + fmt.Sprintf("%s%s", time.Now().Format(defaultArchiveTimeFormat), defaultArchiveSuffix)) +} + +func (a *archiver) timeFromLogFilename(filename, prefix, ext string) (time.Time, error) { + if !strings.HasPrefix(filename, prefix+"-") { + return time.Time{}, errors.New("mismatched prefix") + } + if !strings.HasSuffix(filename, ext) { + return time.Time{}, errors.New("mismatched extension") + } + ts := filename[len(prefix+"-") : len(filename)-len(ext)] + return time.Parse(a.backupTimeFormat, ts) +} + +func (a *archiver) timeFromGzipFilename(filename string) (time.Time, error) { + if !strings.HasSuffix(filename, defaultArchiveSuffix) { + return time.Time{}, errors.New("mismatched extension") + } + ts := filename[:len(filename)-len(defaultArchiveSuffix)] + return time.Parse(defaultArchiveTimeFormat, ts) +} + +type logInfo struct { + timestamp time.Time + os.FileInfo +} + +type byFormatTime []logInfo + +func (b byFormatTime) Less(i, j int) bool { + return b[i].timestamp.After(b[j].timestamp) +} + +func (b byFormatTime) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byFormatTime) Len() int { + return len(b) +} diff --git a/chown.go b/chown.go new file mode 100644 index 0000000..1f7a512 --- /dev/null +++ b/chown.go @@ -0,0 +1,9 @@ +//go:build !linux + +package loggeradapter + +import "os" + +func chown(_ string, _ os.FileInfo) error { + return nil +} diff --git a/chown_linux.go b/chown_linux.go new file mode 100644 index 0000000..36059c8 --- /dev/null +++ b/chown_linux.go @@ -0,0 +1,18 @@ +package loggeradapter + +import ( + "os" + "syscall" +) + +var osChown = os.Chown + +func chown(name string, info os.FileInfo) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + f.Close() + stat := info.Sys().(*syscall.Stat_t) + return osChown(name, int(stat.Uid), int(stat.Gid)) +} diff --git a/parser.go b/parser.go new file mode 100644 index 0000000..cd56c8f --- /dev/null +++ b/parser.go @@ -0,0 +1,219 @@ +package loggeradapter + +import ( + "errors" + "fmt" + "regexp" + "strconv" +) + +const ( + regexpDataDuration = `(?i)^(\d+)(y|year|M|month|mo|mon|w|week|d|day|h|hour|m|minute|min|s|second)$` + regexpDataFileSize = `(?i)^(\d+)(b|byte|kb|kilobyte|mb|megabyte|gb|gigabyte|tb|terabyte)$` // |pb|petabyte|zb|zettabyte + + regexpDuration = `(?i)^(y|year|M|month|mo|mon|w|week|d|day|h|hour|m|minute|min|s|second)$` + regexpFileSize = `(?i)^(b|byte|kb|kiloByte|mb|megabyte|gb|gigabyte|tb|terabyte)$` // |pb|petabyte|zb|zettabyte + + regexpFixedInterval = `(?i)^(annually|monthly|weekly|daily|hourly|minutely|secondly)$` + + regexpYear = `(?i)^(y|year)$` + regexpMonth = `(?i)^(month|mo|mon)$` + regexpMonthM = `^(M)$` + regexpWeek = `(?i)^(w|week)$` + regexpDay = `(?i)^(d|day)$` + regexpHour = `(?i)^(h|hour)$` + regexpMinute = `(?i)^(minute|min)$` + regexpMinuteM = `^(m)$` + regexpSecond = `(?i)^(s|second)$` + + regexpByte = `(?i)^(b|byte)$` + regexpKB = `(?i)^(kb|kilobyte)$` + regexpMB = `(?i)^(mb|megabyte)$` + regexpGB = `(?i)^(gb|gigabyte)$` + regexpTB = `(?i)^(tb|terabyte)$` + + regexpFixedAnnually = `(?i)^(annually)$` + regexpFixedMonthly = `(?i)^(monthly)$` + regexpFixedWeekly = `(?i)^(weekly)$` + regexpFixedDaily = `(?i)^(daily)$` + regexpFixedHourly = `(?i)^(hourly)$` + regexpFixedMinutely = `(?i)^(minutely)$` + regexpFixedSecondly = `(?i)^(secondly)$` +) + +// ParseExpression match and parse expression +// +// rotation: [y/M/w/d/h/m/s] / [b/kb/mb/gb/tb] / [annually|monthly|weekly|daily|hourly|minutely|secondly] +// +// retain/archive: [y/M/w/d/h/m/s] / [] +func ParseExpression(expression string) (int, string, error) { + if IsFixedInterval(expression) { + return parseFixedInterval(expression) + } + + matches, err := parseExpression(regexpDataDuration, expression) + if err == nil { + return parseMatches(matches) + } + + matches, err = parseExpression(regexpDataFileSize, expression) + if err == nil { + return parseMatches(matches) + } + + var v int + v, err = strconv.Atoi(expression) + if err != nil { + return 0, "", fmt.Errorf("invalid expression: %s", expression) + } + + return v, "", nil +} + +func parseExpression(regexpPattern string, expression string) ([]string, error) { + regex, err := regexp.Compile(regexpPattern) + if err != nil { + return nil, fmt.Errorf("invalid regexp: %s", regexpPattern) + } + + matches := regex.FindStringSubmatch(expression) + if len(matches) != 3 { + return nil, fmt.Errorf("invalid expression: %s", expression) + } + + return matches, nil +} + +func parseMatches(matches []string) (int, string, error) { + if len(matches) != 3 { + return 0, "", errors.New("not enough matches") + } + + vs := matches[1] + unit := matches[2] + + v, _err := strconv.Atoi(vs) + if _err != nil { + return 0, "", fmt.Errorf("invalid expression value: %s", vs) + } + + return v, unit, nil +} + +func parseFixedInterval(expression string) (int, string, error) { + if IsFixedAnnually(expression) { + return 1, "y", nil + } + if IsFixedMonthly(expression) { + return 1, "M", nil + } + if IsFixedWeekly(expression) { + return 1, "w", nil + } + if IsFixedDaily(expression) { + return 1, "d", nil + } + if IsFixedHourly(expression) { + return 1, "h", nil + } + if IsFixedMinutely(expression) { + return 1, "m", nil + } + if IsFixedSecondly(expression) { + return 1, "s", nil + } + return 0, "", fmt.Errorf("invalid expression: %s", expression) +} + +func Match(pattern string, s string) bool { + matched, _ := regexp.MatchString(pattern, s) + return matched +} + +func IsFixedInterval(unit string) bool { + return Match(regexpFixedInterval, unit) +} + +func IsDuration(unit string) bool { + return Match(regexpDuration, unit) +} + +func IsFileSize(unit string) bool { + return Match(regexpFileSize, unit) +} + +func IsYear(unit string) bool { + return Match(regexpYear, unit) +} + +func IsMonth(unit string) bool { + return Match(regexpMonth, unit) || Match(regexpMonthM, unit) +} + +func IsWeek(unit string) bool { + return Match(regexpWeek, unit) +} + +func IsDay(unit string) bool { + return Match(regexpDay, unit) +} + +func IsHour(unit string) bool { + return Match(regexpHour, unit) +} + +func IsMinute(unit string) bool { + return Match(regexpMinute, unit) || Match(regexpMinuteM, unit) +} + +func IsSecond(unit string) bool { + return Match(regexpSecond, unit) +} + +func IsByte(unit string) bool { + return Match(regexpByte, unit) +} + +func IsKB(unit string) bool { + return Match(regexpKB, unit) +} + +func IsMB(unit string) bool { + return Match(regexpMB, unit) +} + +func IsGB(unit string) bool { + return Match(regexpGB, unit) +} + +func IsTB(unit string) bool { + return Match(regexpTB, unit) +} + +func IsFixedAnnually(unit string) bool { + return Match(regexpFixedAnnually, unit) +} + +func IsFixedMonthly(unit string) bool { + return Match(regexpFixedMonthly, unit) +} + +func IsFixedWeekly(unit string) bool { + return Match(regexpFixedWeekly, unit) +} + +func IsFixedDaily(unit string) bool { + return Match(regexpFixedDaily, unit) +} + +func IsFixedHourly(unit string) bool { + return Match(regexpFixedHourly, unit) +} + +func IsFixedMinutely(unit string) bool { + return Match(regexpFixedMinutely, unit) +} + +func IsFixedSecondly(unit string) bool { + return Match(regexpFixedSecondly, unit) +} diff --git a/parser_test.go b/parser_test.go new file mode 100644 index 0000000..b2fa02d --- /dev/null +++ b/parser_test.go @@ -0,0 +1,57 @@ +package loggeradapter + +import ( + "testing" +) + +func TestParseExpression(t *testing.T) { + for _, rotation := range testStrings { + v, unit, err := ParseExpression(rotation) + if err != nil { + t.Error("error:", err) + continue + } + + t.Logf("匹配成功: %s,值: %d,单位: %s, isDuration: %v, isFileSize: %v\n", + rotation, v, unit, IsDuration(unit), IsFileSize(unit)) + } +} + +var testStrings = []string{ + "1y", "2Y", "3year", "4Year", "5YEAR", // 匹配 + "1M", "2mo", "3mon", "4month", "5Month", "6MONTH", // 匹配 + "1w", "2W", "3week", "4Week", "5WEEK", // 匹配 + "1d", "2D", "3day", "4Day", "5DAY", // 匹配 + "1h", "2H", "3hour", "4Hour", "5HOUR", // 匹配 + "1m", "2minute", "3Minute", "4MINUTE", "5min", "6Min", // 匹配 + "1second", "2Second", "3SECOND", "4s", "5S", // 匹配 + + "y", "Y", "year", "Year", "YEAR", // 不匹配 + "M", "mo", "mon", "month", "Month", "MONTH", // 不匹配 + "w", "W", "week", "Week", "WEEK", // 不匹配 + "d", "D", "day", "Day", "DAY", // 不匹配 + "h", "H", "hour", "Hour", "HOUR", // 不匹配 + "m", "minute", "Minute", "MINUTE", "min", "Min", // 不匹配 + "second", "Second", "SECOND", "s", "S", // 不匹配 + "1invalid", "2yday", "4mmonth", // 不匹配 + + "1b", "2B", "3byte", "4Byte", "5BYTE", // 匹配 + "1kb", "2KB", "3kilobyte", "4KiloByte", "5KILOBYTE", "6Kb", // 匹配 + "1mb", "2MB", "3Mb", "4megaByte", "5MegaByte", "6MEGABYTE", // 匹配 + "1gb", "2GB", "3Gb", "4gigaByte", "5GigaByte", "6GIGABYTE", // 匹配 + "1tb", "2TB", "3Tb", "4teraByte", "5TeraByte", "6TERABYTE", // 匹配 + "1pb", "2PB", "3Pb", "4petaByte", "5PetaByte", "6PETABYTE", // 匹配 + "1zb", "2ZB", "3Zb", "4zettaByte", "5ZettaByte", "6ZETTABYTE", // 匹配 + + "b", "B", "byte", "Byte", "BYTE", // 匹配 + "kb", "KB", "kilobyte", "KiloByte", "KILOBYTE", "Kb", // 不匹配 + "mb", "MB", "Mb", "megaByte", "MegaByte", "MEGABYTE", // 不匹配 + "gb", "GB", "Gb", "gigaByte", "GigaByte", "GIGABYTE", // 不匹配 + "tb", "TB", "Tb", "teraByte", "TeraByte", "TERABYTE", // 不匹配 + "pb", "PB", "Pb", "petaByte", "PetaByte", "PETABYTE", // 不匹配 + "zb", "ZB", "Zb", "zettaByte", "ZettaByte", "ZETTABYTE", // 不匹配 + + "1year", "1Month", "1Week", "1Day", "1Hour", "1Minute", "1Second", // 匹配 + "Annually", "Monthly", "Weekly", "Daily", "Hourly", "Minutely", "Secondly", // 匹配 + "year", "Month", "Week", "Day", "Hour", "Minute", "Second", // 不匹配 +} diff --git a/rotator.go b/rotator.go new file mode 100644 index 0000000..eee9b97 --- /dev/null +++ b/rotator.go @@ -0,0 +1,245 @@ +package loggeradapter + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "time" +) + +const ( + defaultTimeFormat = "2006-01-02T15-04-05.000" + defaultFilename = "./logs/log.log" + defaultMaxSizeByte = 100 * 1024 * 1024 // 100MB +) + +type rotator struct { + v int + unit string + + isDuration bool + isFileSize bool + + isYear bool + isMonth bool + isWeek bool + isDay bool + isHour bool + isMinute bool + isSecond bool + + timeFormat string + nextTime time.Time + + filename string + file *os.File + maxSizeByte int64 + fileSizeByte int64 + mu sync.Mutex +} + +func newRotator(cfg Config) *rotator { + v, unit, err := ParseExpression(cfg.Rotation) + if err != nil { + panic(fmt.Sprintf("Parse rotation expression failed. error: %v", err)) + } + + r := &rotator{ + v: v, + unit: unit, + filename: cfg.Filename, + isDuration: IsDuration(unit), + isFileSize: IsFileSize(unit), + isYear: IsYear(unit), + isMonth: IsMonth(unit), + isWeek: IsWeek(unit), + isDay: IsDay(unit), + isHour: IsHour(unit), + isMinute: IsMinute(unit), + isSecond: IsSecond(unit), + } + + r.setTimeFormat() + r.setNextTime() + r.setMaxSize() + + return r +} + +func (r *rotator) setTimeFormat() { + if r.isFileSize { + r.timeFormat = defaultTimeFormat + return + } + if r.isYear { + r.timeFormat = "2006" + return + } + if r.isMonth { + r.timeFormat = "2006-01" + return + } + if r.isWeek || r.isDay { + r.timeFormat = "2006-01-02" + return + } + if r.isHour { + r.timeFormat = "2006-01-02T15" + return + } + if r.isMinute { + r.timeFormat = "2006-01-02T15-04" + return + } + if r.isSecond { + r.timeFormat = "2006-01-02T15-04-05" + return + } +} + +func (r *rotator) setNextTime() { + if r.isFileSize { + return + } + + now := time.Now() + + if r.isYear { + r.nextTime = now.AddDate(r.v, 0, 0) + return + } + if r.isMonth { + r.nextTime = now.AddDate(0, r.v, 0) + return + } + if r.isWeek { + r.nextTime = now.AddDate(0, 0, r.v*7) + return + } + if r.isDay { + r.nextTime = now.AddDate(0, 0, r.v) + return + } + if r.isHour { + du, _ := time.ParseDuration(fmt.Sprintf("%dh", r.v)) + r.nextTime = now.Add(du) + return + } + if r.isMinute { + du, _ := time.ParseDuration(fmt.Sprintf("%dm", r.v)) + r.nextTime = now.Add(du) + return + } + if r.isSecond { + du, _ := time.ParseDuration(fmt.Sprintf("%ds", r.v)) + r.nextTime = now.Add(du) + return + } +} + +func (r *rotator) setMaxSize() { + if r.isDuration { + r.maxSizeByte = defaultMaxSizeByte + return + } + + if IsByte(r.unit) { + r.maxSizeByte = int64(r.v) + return + } + if IsKB(r.unit) { + r.maxSizeByte = int64(r.v * 1024) + return + } + if IsMB(r.unit) { + r.maxSizeByte = int64(r.v * 1024 * 1024) + return + } + if IsGB(r.unit) { + r.maxSizeByte = int64(r.v * 1024 * 1024 * 1024) + return + } + if IsTB(r.unit) { + r.maxSizeByte = int64(r.v * 1024 * 1024 * 1024 * 1024) + return + } +} + +func (r *rotator) getNewFilename() string { + if r.filename == "" { + r.filename = defaultFilename + } + + suffix := time.Now().Format(r.timeFormat) + + dir := filepath.Dir(r.filename) + prefix, ext := prefixAndExt(r.filename) + + return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, suffix, ext)) +} + +func (r *rotator) openNewFile() error { + err := os.MkdirAll(filepath.Dir(r.filename), 0755) + if err != nil { + return fmt.Errorf("can't make directories for new logfile: %s", err) + } + + var info os.FileInfo + info, err = os.Stat(r.filename) + if err == nil { + // Copy the mode off the old logfile. + // move the existing file + newFilename := r.getNewFilename() + if err = os.Rename(r.filename, newFilename); err != nil { + return fmt.Errorf("can't rename log file: %s", err) + } + // this is a no-op anywhere but linux + if err = chown(r.filename, info); err != nil { + return err + } + } + + r.file, err = openFile(r.filename) + if err != nil { + return fmt.Errorf("can't open new logfile: %s", err) + } + + r.setNextTime() + r.fileSizeByte = 0 + return nil +} + +func (r *rotator) close() error { + if r.file == nil { + return nil + } + + err := r.file.Close() + r.file = nil + return err +} + +func (r *rotator) rotateWrite(content []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + + writeLen := int64(len(content)) + + if (r.isDuration && time.Now().After(r.nextTime)) || + (r.isFileSize && r.fileSizeByte+writeLen >= r.maxSizeByte) { + if err = r.close(); err != nil { + return 0, err + } + } + + if r.file == nil { + if err = r.openNewFile(); err != nil { + return 0, err + } + } + + n, err = r.file.Write(content) + r.fileSizeByte += int64(n) + return n, err +} diff --git a/writer.go b/writer.go new file mode 100644 index 0000000..51616f8 --- /dev/null +++ b/writer.go @@ -0,0 +1,112 @@ +package loggeradapter + +import ( + "fmt" + "io" + "os" + "path/filepath" +) + +type LoggerWriter interface { + io.Writer +} + +type Config struct { + Filename string + Rotation string + Backup string + Archive string + + timeFormat string +} + +type loggerWriter struct { + filename string + rotator *rotator + archiver *archiver + maxSizeByte int64 + file *os.File +} + +func New(cfg Config) LoggerWriter { + lw := &loggerWriter{filename: cfg.Filename} + + if err := lw.openFile(); err != nil { + panic(fmt.Sprintf("Open log file failed, error: %v", err)) + } + + cfg.Filename = lw.filename + lw.rotator = newRotator(cfg) + + if lw.rotator != nil { + lw.rotator.file = lw.file + cfg.timeFormat = lw.rotator.timeFormat + lw.maxSizeByte = lw.rotator.maxSizeByte + } + + if lw.maxSizeByte == 0 { + lw.maxSizeByte = defaultMaxSizeByte + } + + lw.archiver = newArchiver(cfg) + return lw +} + +func (w *loggerWriter) Write(p []byte) (n int, err error) { + writeLen := int64(len(p)) + + if w.maxSizeByte != 0 && writeLen > w.maxSizeByte { + return 0, fmt.Errorf( + "write length %d exceeds maximum file size %d", writeLen, w.maxSizeByte, + ) + } + + if w.rotator != nil { + n, err = w.rotator.rotateWrite(p) + w.file = w.rotator.file + } else if w.file != nil { + n, err = w.file.Write(p) + } + + if w.archiver != nil { + w.archiver.archive() + } + + return 0, nil +} + +func (w *loggerWriter) openFile() error { + if w.filename == "" { + w.filename = defaultFilename + } + + file, err := openFile(w.filename) + if err != nil { + return fmt.Errorf("can't open logfile: %s", err) + } + + w.file = file + return nil +} + +func openFile(filename string) (*os.File, error) { + dir := filepath.Dir(filename) + + if _, err := os.Stat(dir); err != nil { + if !os.IsNotExist(err) { + return nil, err + } + if err = os.MkdirAll(dir, os.ModePerm); err != nil { + return nil, err + } + } + + return os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_APPEND, os.ModePerm) +} + +func prefixAndExt(filename string) (prefix, ext string) { + name := filepath.Base(filename) + ext = filepath.Ext(name) + prefix = name[:len(name)-len(ext)] + return prefix, ext +}