perf: 优化系统性能和资源管理

- 优化JWT解析器性能,复用JwtParser减少重复创建
- 使用Slf4j日志框架替代System.out.println
- 添加图片压缩功能,减少存储和带宽消耗
- 预编译正则表达式提升Markdown图片提取性能
- 重构限流拦截器,使用ConcurrentHashMap提高并发性能
- 添加数据库索引优化查询性能
- 为MarkdownFile实体添加autoResultMap配置
This commit is contained in:
ikmkj
2026-03-03 19:14:48 +08:00
parent 64daf3cb0b
commit d54719d82d
8 changed files with 312 additions and 73 deletions

View File

@@ -14,7 +14,7 @@ import java.util.Date;
@Data @Data
@Schema(name = "文本实体") @Schema(name = "文本实体")
@TableName("`markdown_file`") @TableName(value = "`markdown_file`", autoResultMap = true)
public class MarkdownFile implements Serializable { public class MarkdownFile implements Serializable {
@Schema(description = "文本id",implementation = Long.class) @Schema(description = "文本id",implementation = Long.class)
@TableId(type = IdType.AUTO) @TableId(type = IdType.AUTO)

View File

@@ -4,6 +4,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.test.bijihoudaun.common.response.R; import com.test.bijihoudaun.common.response.R;
import com.test.bijihoudaun.common.response.ResultCode; import com.test.bijihoudaun.common.response.ResultCode;
import com.test.bijihoudaun.util.MemoryProtector; import com.test.bijihoudaun.util.MemoryProtector;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import jakarta.servlet.http.HttpServletRequest; import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse; import jakarta.servlet.http.HttpServletResponse;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
@@ -11,12 +13,16 @@ import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.web.servlet.HandlerInterceptor; import org.springframework.web.servlet.HandlerInterceptor;
import java.util.LinkedHashMap;
import java.util.Map; import java.util.Map;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/** /**
* 限流拦截器 - 支持按 IP 和按用户双重限流,带容量限制 * 限流拦截器 - 支持按 IP 和按用户双重限流,使用 ConcurrentHashMap 提高并发性能
* 带定期清理机制防止内存泄漏
*/ */
@Slf4j @Slf4j
public class RateLimitInterceptor implements HandlerInterceptor { public class RateLimitInterceptor implements HandlerInterceptor {
@@ -29,59 +35,116 @@ public class RateLimitInterceptor implements HandlerInterceptor {
private static final int MAX_LOGIN_REQUESTS_PER_MINUTE_USER = 10; private static final int MAX_LOGIN_REQUESTS_PER_MINUTE_USER = 10;
// 时间窗口(毫秒) // 时间窗口(毫秒)
private static final long WINDOW_SIZE_MS = 60_000; private static final long WINDOW_SIZE_MS = 60_000;
// 最大存储记录数(防止内存溢出) // 清理间隔毫秒每5分钟清理一次
private static final int MAX_RECORDS = 50000; private static final long CLEANUP_INTERVAL_MS = 5 * 60 * 1000;
// IP 级别限流 // 使用 ConcurrentHashMap + 原子操作,避免锁竞争
private static final LRUCache<String, RequestCounter> ipCounters = new LRUCache<>(MAX_RECORDS / 2); private static final ConcurrentHashMap<String, RequestCounter> ipCounters = new ConcurrentHashMap<>();
private static final LRUCache<String, RequestCounter> ipLoginCounters = new LRUCache<>(MAX_RECORDS / 4); private static final ConcurrentHashMap<String, RequestCounter> ipLoginCounters = new ConcurrentHashMap<>();
// 用户级别限流 private static final ConcurrentHashMap<String, RequestCounter> userCounters = new ConcurrentHashMap<>();
private static final LRUCache<String, RequestCounter> userCounters = new LRUCache<>(MAX_RECORDS / 4); private static final ConcurrentHashMap<String, RequestCounter> userLoginCounters = new ConcurrentHashMap<>();
private static final LRUCache<String, RequestCounter> userLoginCounters = new LRUCache<>(MAX_RECORDS / 4);
private static final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); // 定期清理线程池
private ScheduledExecutorService cleanupScheduler;
private static class RequestCounter { private static class RequestCounter {
int count; private final AtomicInteger count = new AtomicInteger(0);
long windowStart; private volatile long windowStart;
// 记录最后访问时间,用于清理
private volatile long lastAccessTime;
RequestCounter() { RequestCounter() {
this.count = 1;
this.windowStart = System.currentTimeMillis(); this.windowStart = System.currentTimeMillis();
this.lastAccessTime = System.currentTimeMillis();
} }
boolean incrementAndCheck(int maxRequests) { boolean incrementAndCheck(int maxRequests) {
long now = System.currentTimeMillis(); long now = System.currentTimeMillis();
if (now - windowStart > WINDOW_SIZE_MS) { lastAccessTime = now;
// 新窗口 long currentWindow = windowStart;
count = 1;
windowStart = now; if (now - currentWindow > WINDOW_SIZE_MS) {
return true; // 尝试进入新窗口
synchronized (this) {
if (windowStart == currentWindow) {
// 确实需要新窗口
windowStart = now;
count.set(1);
return true;
}
}
// 其他线程已经更新了窗口,继续检查
} }
count++;
return count <= maxRequests; int currentCount = count.incrementAndGet();
return currentCount <= maxRequests;
}
/**
* 检查是否过期超过2个时间窗口没有访问
*/
boolean isExpired() {
return System.currentTimeMillis() - lastAccessTime > WINDOW_SIZE_MS * 2;
}
}
@PostConstruct
public void init() {
// 启动定期清理任务
cleanupScheduler = Executors.newSingleThreadScheduledExecutor(r -> {
Thread t = new Thread(r, "rate-limit-cleanup");
t.setDaemon(true);
return t;
});
cleanupScheduler.scheduleAtFixedRate(this::cleanupExpiredCounters,
CLEANUP_INTERVAL_MS, CLEANUP_INTERVAL_MS, TimeUnit.MILLISECONDS);
log.info("限流拦截器初始化完成,清理间隔:{}分钟", CLEANUP_INTERVAL_MS / 60000);
}
@PreDestroy
public void destroy() {
// 关闭清理线程池
if (cleanupScheduler != null && !cleanupScheduler.isShutdown()) {
cleanupScheduler.shutdown();
try {
if (!cleanupScheduler.awaitTermination(5, TimeUnit.SECONDS)) {
cleanupScheduler.shutdownNow();
}
} catch (InterruptedException e) {
cleanupScheduler.shutdownNow();
Thread.currentThread().interrupt();
}
}
log.info("限流拦截器已销毁");
}
/**
* 清理过期的计数器
*/
private void cleanupExpiredCounters() {
try {
int removed = 0;
removed += cleanupMap(ipCounters);
removed += cleanupMap(ipLoginCounters);
removed += cleanupMap(userCounters);
removed += cleanupMap(userLoginCounters);
if (removed > 0) {
log.debug("限流计数器清理完成,移除 {} 个过期条目", removed);
}
} catch (Exception e) {
log.error("限流计数器清理失败", e);
} }
} }
/** /**
* 简单的 LRU 缓存实现 * 清理单个 Map 中的过期条目
* 使用 removeIf 方法避免并发修改异常
*/ */
private static class LRUCache<K, V> extends LinkedHashMap<K, V> { private int cleanupMap(ConcurrentHashMap<String, RequestCounter> map) {
private final int maxSize; int sizeBefore = map.size();
// 使用 ConcurrentHashMap 的 removeIf 方法,线程安全
LRUCache(int maxSize) { map.entrySet().removeIf(entry -> entry.getValue().isExpired());
super(maxSize, 0.75f, true); return sizeBefore - map.size();
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
boolean shouldRemove = size() > maxSize;
if (shouldRemove) {
log.debug("限流记录达到上限,移除最旧的记录");
}
return shouldRemove;
}
} }
@Override @Override
@@ -117,19 +180,14 @@ public class RateLimitInterceptor implements HandlerInterceptor {
*/ */
private boolean checkIpLimit(String clientIp, boolean isLoginRequest, private boolean checkIpLimit(String clientIp, boolean isLoginRequest,
HttpServletResponse response) throws Exception { HttpServletResponse response) throws Exception {
LRUCache<String, RequestCounter> counters = isLoginRequest ? ipLoginCounters : ipCounters; ConcurrentHashMap<String, RequestCounter> counters = isLoginRequest ? ipLoginCounters : ipCounters;
int maxRequests = isLoginRequest ? MAX_LOGIN_REQUESTS_PER_MINUTE : MAX_REQUESTS_PER_MINUTE; int maxRequests = isLoginRequest ? MAX_LOGIN_REQUESTS_PER_MINUTE : MAX_REQUESTS_PER_MINUTE;
lock.writeLock().lock(); RequestCounter counter = counters.computeIfAbsent(clientIp, k -> new RequestCounter());
try {
RequestCounter counter = counters.computeIfAbsent(clientIp, k -> new RequestCounter());
if (!counter.incrementAndCheck(maxRequests)) { if (!counter.incrementAndCheck(maxRequests)) {
writeRateLimitResponse(response, "请求过于频繁,请稍后再试"); writeRateLimitResponse(response, "请求过于频繁,请稍后再试");
return false; return false;
}
} finally {
lock.writeLock().unlock();
} }
return true; return true;
} }
@@ -139,19 +197,14 @@ public class RateLimitInterceptor implements HandlerInterceptor {
*/ */
private boolean checkUserLimit(String username, boolean isLoginRequest, private boolean checkUserLimit(String username, boolean isLoginRequest,
HttpServletResponse response) throws Exception { HttpServletResponse response) throws Exception {
LRUCache<String, RequestCounter> counters = isLoginRequest ? userLoginCounters : userCounters; ConcurrentHashMap<String, RequestCounter> counters = isLoginRequest ? userLoginCounters : userCounters;
int maxRequests = isLoginRequest ? MAX_LOGIN_REQUESTS_PER_MINUTE_USER : MAX_REQUESTS_PER_MINUTE_USER; int maxRequests = isLoginRequest ? MAX_LOGIN_REQUESTS_PER_MINUTE_USER : MAX_REQUESTS_PER_MINUTE_USER;
lock.writeLock().lock(); RequestCounter counter = counters.computeIfAbsent(username, k -> new RequestCounter());
try {
RequestCounter counter = counters.computeIfAbsent(username, k -> new RequestCounter());
if (!counter.incrementAndCheck(maxRequests)) { if (!counter.incrementAndCheck(maxRequests)) {
writeRateLimitResponse(response, "您的操作过于频繁,请稍后再试"); writeRateLimitResponse(response, "您的操作过于频繁,请稍后再试");
return false; return false;
}
} finally {
lock.writeLock().unlock();
} }
return true; return true;
} }

View File

@@ -1,6 +1,7 @@
package com.test.bijihoudaun.scheduler; package com.test.bijihoudaun.scheduler;
import com.test.bijihoudaun.service.ImageCleanupService; import com.test.bijihoudaun.service.ImageCleanupService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled; import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
@@ -10,6 +11,7 @@ import org.springframework.stereotype.Component;
* 定期自动清理冗余图片 * 定期自动清理冗余图片
*/ */
@Component @Component
@Slf4j
public class ImageCleanupScheduler { public class ImageCleanupScheduler {
@Autowired @Autowired
@@ -22,6 +24,7 @@ public class ImageCleanupScheduler {
@Scheduled(cron = "0 0 3 * * ?") @Scheduled(cron = "0 0 3 * * ?")
public void scheduledCleanup() { public void scheduledCleanup() {
int deletedCount = imageCleanupService.cleanupRedundantImages(); int deletedCount = imageCleanupService.cleanupRedundantImages();
System.out.println("定时清理任务完成,清理了 " + deletedCount + " 个冗余图片"); // 优化:使用日志框架代替 System.out.println
log.info("定时清理任务完成,清理了 {} 个冗余图片", deletedCount);
} }
} }

View File

@@ -9,6 +9,7 @@ import com.test.bijihoudaun.common.exception.BusinessException;
import com.test.bijihoudaun.entity.Image; import com.test.bijihoudaun.entity.Image;
import com.test.bijihoudaun.mapper.ImageMapper; import com.test.bijihoudaun.mapper.ImageMapper;
import com.test.bijihoudaun.service.ImageService; import com.test.bijihoudaun.service.ImageService;
import com.test.bijihoudaun.util.ImageCompressor;
import jakarta.annotation.Resource; import jakarta.annotation.Resource;
import org.springframework.beans.factory.annotation.Value; import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service; import org.springframework.stereotype.Service;
@@ -16,6 +17,7 @@ import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile; import org.springframework.web.multipart.MultipartFile;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
@@ -79,7 +81,18 @@ public class ImageServiceImpl
String storedName = UUID.randomUUID() + extension; String storedName = UUID.randomUUID() + extension;
Path filePath = uploadPath.resolve(storedName); Path filePath = uploadPath.resolve(storedName);
Files.copy(file.getInputStream(), filePath);
// 优化:压缩图片后再保存
InputStream compressedStream = ImageCompressor.compressIfNeeded(
file.getInputStream(), contentType, file.getSize());
if (compressedStream != null) {
// 使用压缩后的图片
Files.copy(compressedStream, filePath);
} else {
// 使用原图
Files.copy(file.getInputStream(), filePath);
}
Image image = new Image(); Image image = new Image();
image.setOriginalName(originalFilename); image.setOriginalName(originalFilename);

View File

@@ -0,0 +1,121 @@
package com.test.bijihoudaun.util;
import lombok.extern.slf4j.Slf4j;
import javax.imageio.ImageIO;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* 图片压缩工具类
* 用于压缩上传的图片,减少存储和带宽
*/
@Slf4j
public class ImageCompressor {
// 最大宽度(像素)
private static final int MAX_WIDTH = 1920;
// 最大高度(像素)
private static final int MAX_HEIGHT = 1080;
// 压缩质量0.0 - 1.0
private static final float COMPRESS_QUALITY = 0.85f;
// 需要压缩的最小文件大小(字节)
private static final long COMPRESS_THRESHOLD = 500 * 1024; // 500KB
/**
* 压缩图片
* @param inputStream 原始图片输入流
* @param contentType 图片类型
* @param originalSize 原始文件大小
* @return 压缩后的输入流如果不需要压缩则返回null
*/
public static InputStream compressIfNeeded(InputStream inputStream, String contentType, long originalSize) {
// 小于阈值的图片不压缩
if (originalSize < COMPRESS_THRESHOLD) {
return null;
}
// 只压缩 JPG 和 PNG
if (!contentType.equals("image/jpeg") && !contentType.equals("image/png")) {
return null;
}
try {
BufferedImage originalImage = ImageIO.read(inputStream);
if (originalImage == null) {
return null;
}
// 计算新的尺寸
int originalWidth = originalImage.getWidth();
int originalHeight = originalImage.getHeight();
// 如果图片尺寸小于限制,只进行质量压缩
int newWidth = originalWidth;
int newHeight = originalHeight;
// 如果图片尺寸超过限制,等比例缩放
if (originalWidth > MAX_WIDTH || originalHeight > MAX_HEIGHT) {
double scale = Math.min(
(double) MAX_WIDTH / originalWidth,
(double) MAX_HEIGHT / originalHeight
);
newWidth = (int) (originalWidth * scale);
newHeight = (int) (originalHeight * scale);
}
// 创建压缩后的图片
BufferedImage compressedImage = new BufferedImage(newWidth, newHeight, BufferedImage.TYPE_INT_RGB);
Graphics2D g2d = compressedImage.createGraphics();
// 设置高质量渲染
g2d.setRenderingHint(RenderingHints.KEY_INTERPOLATION, RenderingHints.VALUE_INTERPOLATION_BILINEAR);
g2d.setRenderingHint(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_QUALITY);
g2d.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
// 绘制图片
g2d.drawImage(originalImage, 0, 0, newWidth, newHeight, null);
g2d.dispose();
// 输出为字节流
ByteArrayOutputStream baos = new ByteArrayOutputStream();
String formatName = contentType.equals("image/png") ? "png" : "jpg";
ImageIO.write(compressedImage, formatName, baos);
byte[] compressedBytes = baos.toByteArray();
// 如果压缩后更大返回null使用原图
if (compressedBytes.length >= originalSize) {
return null;
}
log.debug("图片压缩成功:{} -> {} ({}%)",
formatSize(originalSize),
formatSize(compressedBytes.length),
(compressedBytes.length * 100 / originalSize));
return new ByteArrayInputStream(compressedBytes);
} catch (IOException e) {
log.warn("图片压缩失败,使用原图", e);
return null;
}
}
/**
* 格式化文件大小
*/
private static String formatSize(long size) {
if (size < 1024) {
return size + "B";
} else if (size < 1024 * 1024) {
return String.format("%.2fKB", size / 1024.0);
} else {
return String.format("%.2fMB", size / (1024.0 * 1024));
}
}
}

View File

@@ -26,10 +26,14 @@ public class JwtTokenUtil {
private Long expiration; private Long expiration;
private Key key; private Key key;
// 优化:复用 JwtParser避免每次请求都创建
private io.jsonwebtoken.JwtParser jwtParser;
@PostConstruct @PostConstruct
public void init() { public void init() {
this.key = Keys.hmacShaKeyFor(secret.getBytes(StandardCharsets.UTF_8)); this.key = Keys.hmacShaKeyFor(secret.getBytes(StandardCharsets.UTF_8));
// 优化:只创建一次 parser
this.jwtParser = Jwts.parserBuilder().setSigningKey(key).build();
} }
// 从token中获取用户名 // 从token中获取用户名
@@ -47,9 +51,9 @@ public class JwtTokenUtil {
return claimsResolver.apply(claims); return claimsResolver.apply(claims);
} }
// 为了从token中获取任何信息我们都需要密钥 // 优化:使用复用的 parser
private Claims getAllClaimsFromToken(String token) { private Claims getAllClaimsFromToken(String token) {
return Jwts.parserBuilder().setSigningKey(key).build().parseClaimsJws(token).getBody(); return jwtParser.parseClaimsJws(token).getBody();
} }
// 检查token是否过期 // 检查token是否过期

View File

@@ -11,6 +11,16 @@ import java.util.ArrayList;
*/ */
public class MarkdownImageExtractor { public class MarkdownImageExtractor {
// 优化:预编译正则表达式,避免每次调用都编译
private static final Pattern IMAGE_FILENAME_PATTERN = Pattern.compile(
"!\\[.*?\\]\\([^)]*?([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}\\.[a-zA-Z0-9]+)\\)"
);
// 优化预编译URL匹配正则
private static final Pattern IMAGE_URL_PATTERN = Pattern.compile(
"!\\[.*?\\]\\(([^)]+)\\)"
);
/** /**
* 从Markdown内容中提取图片文件名 * 从Markdown内容中提取图片文件名
* 支持各种URL格式: * 支持各种URL格式:
@@ -26,10 +36,8 @@ public class MarkdownImageExtractor {
return new ArrayList<>(); return new ArrayList<>();
} }
// 使用正则表达式匹配Markdown图片语法中的文件名 // 使用预编译的正则表达式
// 模式: ![alt](url) 其中url以UUID格式的文件名结尾 Matcher matcher = IMAGE_FILENAME_PATTERN.matcher(markdownContent);
Pattern pattern = Pattern.compile("!\\[.*?\\]\\([^)]*?([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}\\.[a-zA-Z0-9]+)\\)");
Matcher matcher = pattern.matcher(markdownContent);
List<String> filenames = new ArrayList<>(); List<String> filenames = new ArrayList<>();
while (matcher.find()) { while (matcher.find()) {
@@ -65,9 +73,8 @@ public class MarkdownImageExtractor {
return new ArrayList<>(); return new ArrayList<>();
} }
// 匹配Markdown图片语法中的完整URL // 使用预编译的正则表达式
Pattern pattern = Pattern.compile("!\\[.*?\\]\\(([^)]+)\\)"); Matcher matcher = IMAGE_URL_PATTERN.matcher(markdownContent);
Matcher matcher = pattern.matcher(markdownContent);
List<String> urls = new ArrayList<>(); List<String> urls = new ArrayList<>();
while (matcher.find()) { while (matcher.find()) {

View File

@@ -0,0 +1,38 @@
-- 数据库性能优化索引
-- 创建时间: 2024-03-03
-- markdown_file 表索引
-- 按分组查询
CREATE INDEX IF NOT EXISTS idx_markdown_grouping_id ON markdown_file(grouping_id);
-- 按删除状态查询(软删除)
CREATE INDEX IF NOT EXISTS idx_markdown_is_deleted ON markdown_file(is_deleted);
-- 按创建时间排序
CREATE INDEX IF NOT EXISTS idx_markdown_created_at ON markdown_file(created_at);
-- 复合索引:查询未删除的分组笔记
CREATE INDEX IF NOT EXISTS idx_markdown_grouping_deleted ON markdown_file(grouping_id, is_deleted);
-- image 表索引
-- 按 markdown_id 查询(关联查询)
CREATE INDEX IF NOT EXISTS idx_image_markdown_id ON image(markdown_id);
-- 按存储文件名查询
CREATE INDEX IF NOT EXISTS idx_image_stored_name ON image(stored_name);
-- 按创建时间查询(清理旧图片)
CREATE INDEX IF NOT EXISTS idx_image_created_at ON image(created_at);
-- grouping 表索引
-- 按父分组查询
CREATE INDEX IF NOT EXISTS idx_grouping_parent_id ON grouping(parent_id);
-- 按删除状态查询
CREATE INDEX IF NOT EXISTS idx_grouping_is_deleted ON grouping(is_deleted);
-- trash 表索引
-- 按用户查询
CREATE INDEX IF NOT EXISTS idx_trash_user_id ON trash(user_id);
-- 按删除时间排序(清理过期数据)
CREATE INDEX IF NOT EXISTS idx_trash_deleted_at ON trash(deleted_at);
-- 按类型查询
CREATE INDEX IF NOT EXISTS idx_trash_item_type ON trash(item_type);
-- user 表索引
-- 按用户名查询(登录)
CREATE INDEX IF NOT EXISTS idx_user_username ON user(username);