feat(blacklist): 支持 hosts 风格黑名单文件并优化热重载机制

- 支持解析 hosts 风格的黑名单文件,可识别以 IP 开头的行,并将后续字段作为域名处理
- 增加对行首及行内注释的支持(支持 `//`、`#`、`;` 符号)
- 使用 atomic.Pointer 管理黑名单匹配器,提升并发安全性
- 优化黑名单热重载逻辑,使用 time.Ticker 替代 time.After 提高稳定性
- 更新相关依赖引用路径,调整 sync 包导入位置
This commit is contained in:
2025-10-17 10:46:48 +08:00
parent 224f575e68
commit 1ab273e2a8
4 changed files with 52 additions and 31 deletions

View File

@@ -4,9 +4,11 @@ import (
"bufio"
"context"
"log"
"net"
"os"
"sort"
"strings"
"sync/atomic"
"time"
"github.com/miekg/dns"
@@ -54,11 +56,33 @@ func loadBlacklistFile(path string) ([]string, error) {
if line == "" {
continue
}
// 行首注释
if strings.HasPrefix(line, "//") || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") {
continue
}
// 行内注释:先 // 再 # ;
if i := strings.Index(line, "//"); i >= 0 {
line = strings.TrimSpace(line[:i])
}
if i := strings.IndexAny(line, "#;"); i >= 0 {
line = strings.TrimSpace(line[:i])
}
if r := canonicalFQDN(line); r != "" {
rules = append(rules, r)
if line == "" {
continue
}
// hosts 风格:第一个字段是 IP则其余每个字段视为域名
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
start := 0
if net.ParseIP(fields[0]) != nil {
start = 1
}
for _, tok := range fields[start:] {
if r := canonicalFQDN(tok); r != "" {
rules = append(rules, r)
}
}
}
if err := sc.Err(); err != nil {
@@ -68,17 +92,19 @@ func loadBlacklistFile(path string) ([]string, error) {
}
// 自动重载黑名单
func startBlacklistReloader(ctx context.Context, path string, interval time.Duration, current **suffixMatcher) {
func startBlacklistReloader(ctx context.Context, path string, interval time.Duration, holder *atomic.Pointer[suffixMatcher]) {
if path == "" {
return
}
go func() {
var lastMod time.Time
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-time.After(interval):
case <-ticker.C:
fi, err := os.Stat(path)
if err != nil {
log.Printf("[blacklist] reload check failed: %v", err)
@@ -91,7 +117,7 @@ func startBlacklistReloader(ctx context.Context, path string, interval time.Dura
log.Printf("[blacklist] reload failed: %v", err)
continue
}
*current = newSuffixMatcher(rules)
holder.Store(newSuffixMatcher(rules))
lastMod = modTime
log.Printf("[blacklist] reloaded %d rules (modified %s)", len(rules), modTime.Format(time.RFC3339))
}
@@ -155,9 +181,8 @@ func makeBlockedUpstream(rcode int, rule string) *dns.Msg {
return m
}
func initBlacklist(ctx context.Context, listStr, filePath, rcodeStr string) (*suffixMatcher, int) {
func initBlacklist(ctx context.Context, listStr, filePath, rcodeStr string) (*atomic.Pointer[suffixMatcher], int) {
var rules []string
if v := strings.TrimSpace(listStr); v != "" {
for _, s := range strings.Split(v, ",") {
if r := canonicalFQDN(s); r != "" {
@@ -165,24 +190,19 @@ func initBlacklist(ctx context.Context, listStr, filePath, rcodeStr string) (*su
}
}
}
if file := strings.TrimSpace(filePath); file != "" {
fileRules, err := loadBlacklistFile(file)
if err != nil {
log.Fatalf("[fatal] failed to load blacklist-file %q: %v", file, err)
}
rules = append(rules, fileRules...)
}
bl := newSuffixMatcher(rules)
blRcode := parseRcode(rcodeStr)
if filePath != "" {
startBlacklistReloader(ctx, filePath, 30*time.Second, &bl)
if fs, err := loadBlacklistFile(filePath); err != nil {
log.Printf("[blacklist] load file error: %v", err)
} else {
rules = append(rules, fs...)
}
}
log.Printf("[blacklist] loaded %d rules (file=%v, rcode=%s)",
len(bl.rules), filePath != "", strings.ToUpper(rcodeStr))
return bl, blRcode
var holder atomic.Pointer[suffixMatcher]
holder.Store(newSuffixMatcher(rules))
blRcode := parseRcode(rcodeStr)
if filePath != "" {
startBlacklistReloader(ctx, filePath, 30*time.Second, &holder)
}
log.Printf("[blacklist] loaded %d rules (file=%v, rcode=%s)", len(holder.Load().rules), filePath != "", strings.ToUpper(rcodeStr))
return &holder, blRcode
}