feat(system/file): 新增多文件分片上传功能,支持本地存储和S3存储

Co-authored-by: kiki1373639299<zkai0106@163.com>



# message auto-generated for no-merge-commit merge:
merge upload into dev

feat(system/file):  新增多文件分片上传功能,支持本地存储和S3存储

Created-by: kiki1373639299
Commit-by: kiki1373639299
Merged-by: Charles_7c
Description: <!--
  非常感谢您的 PR!在提交之前,请务必确保您 PR 的代码经过了完整测试,并且通过了代码规范检查。
-->

<!-- 在 [] 中输入 x 来勾选) -->

## PR 类型

<!-- 您的 PR 引入了哪种类型的变更? -->
<!-- 只支持选择一种类型,如果有多种类型,可以在更新日志中增加 “类型” 列。 -->

- [X] 新 feature
- [ ] Bug 修复
- [ ] 功能增强
- [ ] 文档变更
- [ ] 代码样式变更
- [ ] 重构
- [ ] 性能改进
- [ ] 单元测试
- [ ] CI/CD
- [ ] 其他

## PR 目的

<!-- 描述一下您的 PR 解决了什么问题。如果可以,请链接到相关 issues。 -->

## 解决方案

<!-- 详细描述您是如何解决的问题 -->

## PR 测试

<!-- 如果可以,请为您的 PR 添加或更新单元测试。 -->
<!-- 请描述一下您是如何测试 PR 的。例如:创建/更新单元测试或添加相关的截图。 -->

## Changelog

| 模块  | Changelog | Related issues |
|-----|-----------| -------------- |
|     |           |                |

<!-- 如果有多种类型的变更,可以在变更日志表中增加 “类型” 列,该列的值与上方 “PR 类型” 相同。 -->
<!-- Related issues 格式为 Closes #<issue号>,或者 Fixes #<issue号>,或者 Resolves #<issue号>。 -->

## 其他信息

<!-- 请描述一下还有哪些注意事项。例如:如果引入了一个不向下兼容的变更,请描述其影响。 -->

## 提交前确认

- [X] PR 代码经过了完整测试,并且通过了代码规范检查
- [] 已经完整填写 Changelog,并链接到了相关 issues
- [X] PR 代码将要提交到 dev 分支

See merge request: continew/continew-admin!11
This commit is contained in:
kiki1373639299
2025-08-12 17:56:30 +08:00
committed by Charles_7c
parent 21b753e5eb
commit af0f58a096
19 changed files with 1896 additions and 2 deletions

View File

@@ -31,12 +31,18 @@
<groupId>org.dromara.x-file-storage</groupId>
<artifactId>x-file-storage-spring</artifactId>
</dependency>
<!-- Amazon S3Amazon Simple Storage Service亚马逊简单存储服务通用存储协议 S3兼容主流云厂商对象存储 -->
<!-- Amazon S3Amazon Simple Storage Service亚马逊简单存储服务通用存储协议 S3兼容主流云厂商对象存储后续会移除替换1.x的版本 -->
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
</dependency>
<!-- Amazon S3 2.x version Amazon Simple Storage Service亚马逊简单存储服务通用存储协议 S3兼容主流云厂商对象存储 -->
<dependency>
<groupId>software.amazon.awssdk</groupId>
<artifactId>s3</artifactId>
</dependency>
<!-- FreeMarker模板引擎 -->
<dependency>
<groupId>org.freemarker</groupId>

View File

@@ -0,0 +1,98 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
* <p>
* Licensed under the GNU LESSER GENERAL PUBLIC LICENSE 3.0;
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.gnu.org/licenses/lgpl.html
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.constant;
/**
* 分片上传常量
*
* @author KAI
* @since 2025/7/30 17:40
*/
public class MultipartUploadConstants {
//todo 后续改为从配置文件读取
/**
* MD5到uploadId的映射前缀
* <p>
* 用于存储文件MD5到uploadId的映射关系实现基于MD5的双列Map结构。
* 键格式multipart:md5_to_upload:{md5}
* 值格式Hash结构包含uploadId和fileInfo
* </p>
*/
public static final String MD5_TO_UPLOAD_ID_PREFIX = "multipart:md5_to_upload:";
/**
* 分片上传信息前缀
* <p>
* 用于存储分片上传的初始化信息包含uploadId、bucket、path等基本信息。
* 键格式multipart:upload:{uploadId}
* 值格式JSON字符串包含MultipartInitResp的序列化数据
* </p>
*/
public static final String MULTIPART_UPLOAD_PREFIX = "multipart:upload:";
/**
* 分片信息前缀
* <p>
* 用于存储所有分片的上传信息使用Hash结构存储。
* 键格式multipart:parts:{uploadId}
* 值格式Hash结构field为分片编号value为FilePartInfo的JSON序列化数据
* </p>
*/
public static final String MULTIPART_PARTS_PREFIX = "multipart:parts:";
/**
* 元数据前缀
* <p>
* 用于存储分片上传的元数据信息,如文件名、大小、类型等。
* 键格式multipart:metadata:{uploadId}
* 值格式Hash结构field为元数据键value为元数据值
* </p>
*/
public static final String MULTIPART_METADATA_PREFIX = "multipart:metadata:";
/**
* 过期时间前缀
* <p>
* 用于存储分片上传的过期时间,用于定期清理过期数据。
* 键格式multipart:expire:{uploadId}
* 值格式ISO格式的时间字符串
* </p>
*/
public static final String MULTIPART_EXPIRE_PREFIX = "multipart:expire:";
/**
* 默认过期时间(小时)
* <p>
* 分片上传缓存数据的默认过期时间,超过此时间的数据会被自动清理。
* 设置为24小时平衡存储空间和用户体验。
* </p>
*/
public static final long DEFAULT_EXPIRE_HOURS = 24;
/**
* 临时文件夹
* <p>
* 分片上传的临时文件夹名称
* </p>
*/
public static final String TEMP_DIR_NAME = "temp";
/**
* 分片大小
*/
public static final long MULTIPART_UPLOAD_PART_SIZE = 5 * 1024 * 1024;
}

View File

@@ -0,0 +1,78 @@
package top.continew.admin.system.controller;
import io.swagger.v3.oas.annotations.Operation;
import jakarta.validation.Valid;
import lombok.RequiredArgsConstructor;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import top.continew.admin.system.model.entity.FileDO;
import top.continew.admin.system.model.req.MultipartUploadInitReq;
import top.continew.admin.system.model.resp.file.MultipartUploadInitResp;
import top.continew.admin.system.model.resp.file.MultipartUploadResp;
import top.continew.admin.system.service.MultipartUploadService;
/**
* 分片上传控制器
*
* @author KAI
* @since 2025/7/30 16:38
*/
@RestController
@RequestMapping("/system/multipart-upload")
@RequiredArgsConstructor
public class MultipartUploadController {
private final MultipartUploadService multipartUploadService;
/**
* 初始化分片上传
*
* @param multiPartUploadInitReq 分片上传信息
* @return 初始化响应
*/
@Operation(summary = "初始化分片上传", description = "初始化分片上传返回uploadId等信息")
@PostMapping("/init")
public MultipartUploadInitResp initMultipartUpload(@RequestBody @Valid MultipartUploadInitReq multiPartUploadInitReq) {
return multipartUploadService.initMultipartUpload(multiPartUploadInitReq);
}
/**
* 上传分片
*
* @param file 分片文件
* @param uploadId 上传ID
* @param partNumber 分片编号
* @param path 文件路径
* @return 上传结果
*/
@Operation(summary = "上传分片", description = "上传单个分片")
@PostMapping("/part")
public MultipartUploadResp uploadPart(@RequestPart("file") MultipartFile file, @RequestParam("uploadId") String uploadId,
@RequestParam("partNumber") Integer partNumber, @RequestParam("path") String path) {
return multipartUploadService.uploadPart(file, uploadId, partNumber, path);
}
/**
* 合并分片
*
* @param uploadId 上传ID
*/
@Operation(summary = "完成分片上传", description = "合并所有分片,完成上传")
@GetMapping("/complete/{uploadId}")
public FileDO completeMultipartUpload(@PathVariable String uploadId) {
return multipartUploadService.completeMultipartUpload(uploadId);
}
/**
* 取消分片上传
*
* @param uploadId 上传ID
*/
@Operation(summary = "取消分片上传", description = "删除缓存信息,分片数据")
@GetMapping("/cancel/{uploadId}")
public void cancelMultipartUpload(@PathVariable String uploadId) {
multipartUploadService.cancelMultipartUpload(uploadId);
}
}

View File

@@ -0,0 +1,108 @@
package top.continew.admin.system.dao;
import top.continew.admin.system.model.resp.file.FilePartInfo;
import top.continew.admin.system.model.resp.file.MultipartUploadInitResp;
import java.util.List;
import java.util.Map;
/**
* 分片上传持久化接口
* <p>
* 纯粹的缓存操作,不包含业务逻辑:
* 1. MD5到uploadId的映射管理
* 2. 分片信息缓存
* 3. 上传状态缓存
* </p>
*
* @author KAI
* @since 2.14.0
*/
public interface MultipartUploadDao {
/**
* 根据MD5获取uploadId
*
* @param md5 文件MD5值
* @return uploadId如果不存在则返回null
*/
String getUploadIdByMd5(String md5);
/**
* 缓存MD5到uploadId的映射
*
* @param md5 文件MD5值
* @param uploadId 上传ID
*/
void setMd5Mapping(String md5, String uploadId);
/**
* 删除MD5映射
*
* @param md5 文件MD5值
*/
void deleteMd5Mapping(String md5);
/**
* 设置缓存分片上传信息
*
* @param uploadId 上传ID
* @param initResp 初始化响应
* @param metadata 元数据
*/
void setMultipartUpload(String uploadId, MultipartUploadInitResp initResp, Map<String, String> metadata);
/**
* 获取分片上传信息
*
* @param uploadId 上传ID
* @return 分片上传信息如果不存在则返回null
*/
MultipartUploadInitResp getMultipartUpload(String uploadId);
/**
* 删除分片上传信息
*
* @param uploadId 上传ID
*/
void deleteMultipartUpload(String uploadId);
void deleteMultipartUploadAll(String uploadId);
/**
* 设置缓存分片信息
*
* @param uploadId 上传ID
* @param filePartInfo 分片信息
*/
void setFilePart(String uploadId, FilePartInfo filePartInfo);
/**
* 获取所有分片信息
*
* @param uploadId 上传ID
* @return 分片信息列表
*/
List<FilePartInfo> getFileParts(String uploadId);
/**
* 删除所有分片信息
*
* @param uploadId 上传ID
*/
void deleteFileParts(String uploadId);
/**
* 检查分片是否存在
*
* @param uploadId 上传ID
* @param partNumber 分片编号
* @return 是否存在
*/
boolean existsFilePart(String uploadId, int partNumber);
/**
* 清理过期的缓存数据
*/
void cleanupExpiredData();
}

View File

@@ -0,0 +1,259 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
* <p>
* Licensed under the GNU LESSER GENERAL PUBLIC LICENSE 3.0;
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.gnu.org/licenses/lgpl.html
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.dao.impl;
import cn.hutool.core.util.StrUtil;
import cn.hutool.json.JSONUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Repository;
import top.continew.admin.system.constant.MultipartUploadConstants;
import top.continew.admin.system.dao.MultipartUploadDao;
import top.continew.admin.system.model.resp.file.FilePartInfo;
import top.continew.admin.system.model.resp.file.MultipartUploadInitResp;
import top.continew.starter.cache.redisson.util.RedisUtils;
import java.time.Duration;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.stream.Collectors;
/**
* Redis分片上传缓存实现
* <p>
* 核心功能:
* 1. MD5到uploadId的映射管理
* 2. 分片信息缓存
* 3. 上传状态缓存
* </p>
*
* @author KAI
* @since 2025/7/30 17:40
*/
@Slf4j
@Repository
public class RedisMultipartUploadDaoDaoImpl implements MultipartUploadDao {
@Override
public String getUploadIdByMd5(String md5) {
String md5Key = MultipartUploadConstants.MD5_TO_UPLOAD_ID_PREFIX + md5;
try {
return RedisUtils.hGet(md5Key, "uploadId");
} catch (Exception e) {
log.error("根据MD5获取uploadId失败: md5={}", md5, e);
return null;
}
}
@Override
public void setMd5Mapping(String md5, String uploadId) {
String md5Key = MultipartUploadConstants.MD5_TO_UPLOAD_ID_PREFIX + md5;
try {
RedisUtils.hSet(md5Key, "uploadId", uploadId);
RedisUtils.expire(md5Key, Duration.ofHours(MultipartUploadConstants.DEFAULT_EXPIRE_HOURS));
log.debug("缓存MD5映射: md5={}, uploadId={}", md5, uploadId);
} catch (Exception e) {
log.error("缓存MD5映射失败: md5={}, uploadId={}", md5, uploadId, e);
throw new RuntimeException("缓存MD5映射失败", e);
}
}
@Override
public void deleteMd5Mapping(String md5) {
String md5Key = MultipartUploadConstants.MD5_TO_UPLOAD_ID_PREFIX + md5;
try {
RedisUtils.delete(md5Key);
log.debug("删除MD5映射: md5={}", md5);
} catch (Exception e) {
log.error("删除MD5映射失败: md5={}", md5, e);
}
}
private String getMd5Mapping(String uploadId) {
List<Object> list = RedisUtils.getList(MultipartUploadConstants.MD5_TO_UPLOAD_ID_PREFIX);
return null;
}
@Override
public void setMultipartUpload(String uploadId, MultipartUploadInitResp initResp, Map<String, String> metadata) {
String key = MultipartUploadConstants.MULTIPART_UPLOAD_PREFIX + uploadId;
String metadataKey = MultipartUploadConstants.MULTIPART_METADATA_PREFIX + uploadId;
try {
// 缓存初始化信息
RedisUtils.set(key, JSONUtil.toJsonStr(initResp), Duration.ofHours(MultipartUploadConstants.DEFAULT_EXPIRE_HOURS));
// 缓存元数据
if (metadata != null && !metadata.isEmpty()) {
for (Map.Entry<String, String> entry : metadata.entrySet()) {
RedisUtils.hSet(metadataKey, entry.getKey(), entry.getValue());
}
RedisUtils.expire(metadataKey, Duration.ofHours(MultipartUploadConstants.DEFAULT_EXPIRE_HOURS));
}
log.debug("缓存分片上传信息: uploadId={}", uploadId);
} catch (Exception e) {
log.error("缓存分片上传信息失败: uploadId={}", uploadId, e);
throw new RuntimeException("缓存分片上传信息失败", e);
}
}
@Override
public MultipartUploadInitResp getMultipartUpload(String uploadId) {
String key = MultipartUploadConstants.MULTIPART_UPLOAD_PREFIX + uploadId;
try {
Object value = RedisUtils.get(key);
if (value != null) {
return JSONUtil.toBean(value.toString(), MultipartUploadInitResp.class);
}
return null;
} catch (Exception e) {
log.error("获取分片上传信息失败: uploadId={}", uploadId, e);
return null;
}
}
@Override
public void deleteMultipartUpload(String uploadId) {
try {
String key = MultipartUploadConstants.MULTIPART_UPLOAD_PREFIX + uploadId;
String metadataKey = MultipartUploadConstants.MULTIPART_METADATA_PREFIX + uploadId;
String expireKey = MultipartUploadConstants.MULTIPART_EXPIRE_PREFIX + uploadId;
// 先获取MD5信息再删除数据
MultipartUploadInitResp initResp = getMultipartUpload(uploadId);
String fileMd5 = initResp.getFileMd5();
if (StrUtil.isNotBlank(fileMd5)) {
deleteMd5Mapping(fileMd5);
}
// 删除分片上传相关数据
RedisUtils.delete(key);
RedisUtils.delete(metadataKey);
RedisUtils.delete(expireKey);
log.debug("删除分片上传信息: uploadId={}", uploadId);
} catch (Exception e) {
log.error("删除分片上传信息失败: uploadId={}", uploadId, e);
}
}
@Override
public void deleteMultipartUploadAll(String uploadId) {
this.deleteMultipartUpload(uploadId);
this.deleteFileParts(uploadId);
// this.deleteMd5Mapping();
}
@Override
public void setFilePart(String uploadId, FilePartInfo partInfo) {
String key = MultipartUploadConstants.MULTIPART_PARTS_PREFIX + uploadId;
String partKey = partInfo.getPartNumber().toString();
try {
RedisUtils.hSet(key, partKey, JSONUtil.toJsonStr(partInfo));
RedisUtils.expire(key, Duration.ofHours(MultipartUploadConstants.DEFAULT_EXPIRE_HOURS));
log.debug("缓存分片信息: uploadId={}, partNumber={}", uploadId, partKey);
} catch (Exception e) {
log.error("缓存分片信息失败: uploadId={}, partNumber={}", uploadId, partKey, e);
throw new RuntimeException("缓存分片信息失败", e);
}
}
@Override
public List<FilePartInfo> getFileParts(String uploadId) {
String key = MultipartUploadConstants.MULTIPART_PARTS_PREFIX + uploadId;
try {
Map<String, Object> entries = RedisUtils.hGetAll(key);
if (entries.isEmpty()) {
return new ArrayList<>();
}
return entries.values()
.stream()
.map(value -> JSONUtil.toBean(value.toString(), FilePartInfo.class))
.sorted(Comparator.comparing(FilePartInfo::getPartNumber))
.collect(Collectors.toList());
} catch (Exception e) {
log.error("获取分片列表失败: uploadId={}", uploadId, e);
return new ArrayList<>();
}
}
@Override
public void deleteFileParts(String uploadId) {
String key = MultipartUploadConstants.MULTIPART_PARTS_PREFIX + uploadId;
try {
RedisUtils.delete(key);
log.debug("删除所有分片信息: uploadId={}", uploadId);
} catch (Exception e) {
log.error("删除所有分片信息失败: uploadId={}", uploadId, e);
}
}
@Override
public boolean existsFilePart(String uploadId, int partNumber) {
String key = MultipartUploadConstants.MULTIPART_PARTS_PREFIX + uploadId;
String partKey = String.valueOf(partNumber);
return RedisUtils.hExists(key, partKey);
}
@Override
public void cleanupExpiredData() {
try {
// 获取所有分片上传的过期时间
Collection<String> keys = RedisUtils.keys(MultipartUploadConstants.MULTIPART_EXPIRE_PREFIX + "*");
if (keys.isEmpty()) {
return;
}
LocalDateTime now = LocalDateTime.now();
List<String> expiredUploadIds = new ArrayList<>();
for (String key : keys) {
String uploadId = key.substring(MultipartUploadConstants.MULTIPART_EXPIRE_PREFIX.length());
Object value = RedisUtils.get(key);
if (value != null) {
try {
LocalDateTime expireTime = LocalDateTime.parse(value
.toString(), DateTimeFormatter.ISO_LOCAL_DATE_TIME);
if (now.isAfter(expireTime)) {
expiredUploadIds.add(uploadId);
}
} catch (Exception e) {
log.warn("解析过期时间失败: uploadId={}, value={}", uploadId, value);
expiredUploadIds.add(uploadId);
}
}
}
// 删除过期的数据
for (String uploadId : expiredUploadIds) {
deleteMultipartUpload(uploadId);
deleteFileParts(uploadId);
log.info("清理过期数据: uploadId={}", uploadId);
}
log.info("清理过期数据完成: count={}", expiredUploadIds.size());
} catch (Exception e) {
log.error("清理过期数据失败", e);
}
}
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.factory;
import jakarta.annotation.PreDestroy;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.S3Configuration;
import software.amazon.awssdk.utils.SdkAutoCloseable;
import top.continew.admin.system.model.entity.StorageDO;
import java.net.URI;
import java.util.concurrent.ConcurrentHashMap;
/**
* 异步 S3 客户端工厂
* <p>支持多 endpoint / 多 accessKey 的动态客户端池</p>
*/
@Slf4j
@Component
public class S3ClientFactory {
private final ConcurrentHashMap<String, S3Client> CLIENT_CACHE = new ConcurrentHashMap<>();
public S3Client getClient(StorageDO storage) {
String key = storage.getEndpoint() + "|" + storage.getAccessKey();
return CLIENT_CACHE.computeIfAbsent(key, k -> {
StaticCredentialsProvider auth = StaticCredentialsProvider.create(AwsBasicCredentials.create(storage
.getAccessKey(), storage.getSecretKey()));
return S3Client.builder()
.credentialsProvider(auth)
.endpointOverride(URI.create(storage.getEndpoint()))
.region(Region.US_EAST_1)
.serviceConfiguration(S3Configuration.builder().chunkedEncodingEnabled(false).build())
.build();
});
}
@PreDestroy
public void closeAll() {
CLIENT_CACHE.values().forEach(SdkAutoCloseable::close);
}
}

View File

@@ -0,0 +1,57 @@
package top.continew.admin.system.factory;/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import cn.hutool.core.util.StrUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import top.continew.admin.system.enums.StorageTypeEnum;
import top.continew.admin.system.handler.StorageHandler;
import top.continew.starter.core.exception.BaseException;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
/**
* 存储处理器工厂
* <p>按类型分发 StorageHandler</p>
* @author KAI
* @since 2025/07/24 13:35
*/
@Component
public class StorageHandlerFactory {
private final Map<StorageTypeEnum, StorageHandler> HANDLER_MAP = new ConcurrentHashMap<>();
@Autowired
public StorageHandlerFactory(List<StorageHandler> handlers) {
for (StorageHandler handler : handlers) {
HANDLER_MAP.put(handler.getType(), handler);
}
}
/**
* 获取指定类型的存储处理器
*
* @param type 存储类型
* @return StorageHandler
*/
public StorageHandler createHandler(StorageTypeEnum type) {
return Optional.ofNullable(HANDLER_MAP.get(type))
.orElseThrow(() -> new BaseException(StrUtil.format("不存在此类型存储处理器:{}: ", type)));
}
}

View File

@@ -0,0 +1,57 @@
package top.continew.admin.system.handler;
import org.springframework.web.multipart.MultipartFile;
import top.continew.admin.system.enums.StorageTypeEnum;
import top.continew.admin.system.model.entity.StorageDO;
import top.continew.admin.system.model.req.MultipartUploadInitReq;
import top.continew.admin.system.model.resp.file.MultipartUploadInitResp;
import top.continew.admin.system.model.resp.file.MultipartUploadResp;
import java.util.List;
/**
* 存储类型处理器
* <p>
* 专注于文件操作,不包含业务逻辑
*
* @author KAI
* @since 2025/7/30 17:15
*/
public interface StorageHandler {
MultipartUploadInitResp initMultipartUpload(StorageDO storageDO, MultipartUploadInitReq req);
/**
* 分片上传
*
* @param storageDO 存储实体
* @param path 存储路径
* @param uploadId 文件名
* @param file 文件对象
* @return {@link MultipartUploadResp} 分片上传结果
*/
MultipartUploadResp uploadPart(StorageDO storageDO, String path, String uploadId, Integer partNumber, MultipartFile file);
/**
* 合并分片
*
* @param storageDO 存储实体
* @param uploadId 上传Id
*/
void completeMultipartUpload(StorageDO storageDO, List<MultipartUploadResp> parts, String path, String uploadId, boolean needVerify);
/**
* 清楚分片
*
* @param storageDO 存储实体
* @param uploadId 上传Id
*/
void cleanPart(StorageDO storageDO, String uploadId);
/**
* 获取存储类型
*
* @return 存储类型
*/
StorageTypeEnum getType();
}

View File

@@ -0,0 +1,236 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.handler.impl;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.crypto.digest.DigestUtil;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import org.springframework.web.multipart.MultipartFile;
import top.continew.admin.system.constant.MultipartUploadConstants;
import top.continew.admin.system.enums.StorageTypeEnum;
import top.continew.admin.system.handler.StorageHandler;
import top.continew.admin.system.model.entity.StorageDO;
import top.continew.admin.system.model.req.MultipartUploadInitReq;
import top.continew.admin.system.model.resp.file.MultipartUploadInitResp;
import top.continew.admin.system.model.resp.file.MultipartUploadResp;
import top.continew.admin.system.service.FileService;
import top.continew.starter.core.exception.BaseException;
import java.io.File;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.Comparator;
import java.util.List;
import java.util.UUID;
/**
* 本地存储处理器
* <p>实现分片上传、合并、取消等操作。</p>
*
* @author KAI
* @since 2023/7/30 22:58
*/
@Slf4j
@Component
@RequiredArgsConstructor
public class LocalStorageHandler implements StorageHandler {
private final FileService fileService;
@Override
public MultipartUploadInitResp initMultipartUpload(StorageDO storageDO, MultipartUploadInitReq req) {
String uploadId = UUID.randomUUID().toString();
String bucket = storageDO.getBucketName(); // 本地存储中bucket是存储根路径
String parentPath = req.getParentPath();
String fileName = req.getFileName();
StrUtil.blankToDefault(parentPath, StrUtil.SLASH);
String relativePath = StrUtil.endWith(parentPath, StrUtil.SLASH) ? parentPath + fileName : parentPath + StrUtil.SLASH + fileName;
try {
// 创建临时目录用于存储分片
String tempDirPath = buildTempDirPath(bucket, uploadId);
FileUtil.mkdir(tempDirPath);
fileService.createParentDir(parentPath, storageDO);
// 构建返回结果
MultipartUploadInitResp result = new MultipartUploadInitResp();
result.setBucket(bucket);
result.setFileId(UUID.randomUUID().toString());
result.setUploadId(uploadId);
result.setPlatform(storageDO.getCode());
result.setFileName(fileName);
result.setFileMd5(req.getFileMd5());
result.setFileSize(req.getFileSize());
result.setExtension(FileUtil.extName(fileName));
result.setContentType(req.getContentType());
result.setPath(relativePath);
result.setParentPath(parentPath);
result.setPartSize(MultipartUploadConstants.MULTIPART_UPLOAD_PART_SIZE);
log.info("本地存储初始化分片上传成功: uploadId={}, path={}", uploadId, parentPath);
return result;
} catch (Exception e) {
log.error("本地存储初始化分片上传失败: {}", e.getMessage(), e);
throw new BaseException("本地存储初始化分片上传失败: " + e.getMessage(), e);
}
}
@Override
public MultipartUploadResp uploadPart(StorageDO storageDO, String path, String uploadId, Integer partNumber, MultipartFile file) {
try {
long size = file.getSize();
String bucket = storageDO.getBucketName();
// 获取临时目录路径
String tempDirPath = buildTempDirPath(bucket, uploadId);
// 确保临时目录存在
File tempDir = new File(tempDirPath);
if (!tempDir.exists()) {
FileUtil.mkdir(tempDirPath);
}
// 保存分片文件
String partFilePath = tempDirPath + File.separator + String.format("part_%s", partNumber);
File partFile = new File(partFilePath);
file.transferTo(partFile);
// 计算ETag (使用MD5)
String etag = DigestUtil.md5Hex(partFile);
// 构建返回结果
MultipartUploadResp result = new MultipartUploadResp();
result.setPartNumber(partNumber);
result.setPartETag(etag);
result.setPartSize(size);
result.setSuccess(true);
log.info("本地存储分片上传成功: uploadId={}, partNumber={}, etag={}", uploadId, partNumber, etag);
return result;
} catch (Exception e) {
log.error("本地存储分片上传失败: uploadId={}, partNumber={}, error={}", uploadId, partNumber, e.getMessage(), e);
MultipartUploadResp result = new MultipartUploadResp();
result.setPartNumber(partNumber);
result.setSuccess(false);
result.setErrorMessage(e.getMessage());
return result;
}
}
@Override
public void completeMultipartUpload(StorageDO storageDO, List<MultipartUploadResp> parts, String path, String uploadId, boolean needVerify) {
String bucket = storageDO.getBucketName(); // 本地存储中bucket是存储根路径
String tempDirPath = buildTempDirPath(bucket, uploadId);
try {
// 本地存储不需要验证,直接使用传入的分片信息
Path targetPath = Paths.get(bucket, path);
Files.createDirectories(targetPath.getParent());
// 合并分片
try (OutputStream out = Files
.newOutputStream(targetPath, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
// 按分片编号排序
List<MultipartUploadResp> sortedParts = parts.stream()
.filter(MultipartUploadResp::isSuccess)
.sorted(Comparator.comparingInt(MultipartUploadResp::getPartNumber))
.toList();
// 逐个读取并写入
for (MultipartUploadResp part : sortedParts) {
Path partPath = Paths.get(tempDirPath, String.format("part_%s", part.getPartNumber()));
if (!Files.exists(partPath)) {
throw new BaseException("分片文件不存在: partNumber=" + part.getPartNumber());
}
Files.copy(partPath, out);
}
}
// 清理临时文件
cleanupTempFiles(tempDirPath);
log.info("本地存储分片合并成功: uploadId={}, targetPath={}", uploadId, targetPath);
} catch (Exception e) {
log.error("本地存储分片合并失败: uploadId={}, path={}, error={}", uploadId, path, e.getMessage(), e);
throw new BaseException("完成分片上传失败: " + e.getMessage(), e);
}
}
@Override
public void cleanPart(StorageDO storageDO, String uploadId) {
try {
String bucket = storageDO.getBucketName();
// 获取临时目录路径
String tempDirPath = buildTempDirPath(bucket, uploadId);
// 清理临时文件
cleanupTempFiles(tempDirPath);
log.info("本地存储分片清理成功: uploadId={}", uploadId);
} catch (Exception e) {
log.error("本地存储分片清理失败: uploadId={}, error={}", uploadId, e.getMessage(), e);
throw new BaseException("本地存储分片清理失败: " + e.getMessage(), e);
}
}
@Override
public StorageTypeEnum getType() {
return StorageTypeEnum.LOCAL;
}
/**
* 构建临时目录路径
*
* @param bucket 存储桶(本地存储根路径)
* @param uploadId 上传ID
* @return 临时目录路径
*/
private String buildTempDirPath(String bucket, String uploadId) {
return StrUtil.appendIfMissing(bucket, File.separator) + MultipartUploadConstants.TEMP_DIR_NAME + File.separator + uploadId;
}
/**
* 构建目标文件路径
*
* @param bucket 存储桶(本地存储根路径)
* @param path 文件路径
* @return 目标文件路径
*/
private String buildTargetDirPath(String bucket, String path) {
return StrUtil.appendIfMissing(bucket, File.separator) + path;
}
/**
* 清理临时文件
*
* @param tempDirPath 临时目录路径
*/
private void cleanupTempFiles(String tempDirPath) {
try {
FileUtil.del(tempDirPath);
} catch (Exception e) {
log.warn("清理临时文件失败: {}, {}", tempDirPath, e.getMessage());
}
}
}

View File

@@ -0,0 +1,298 @@
package top.continew.admin.system.handler.impl;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.StrUtil;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import org.springframework.web.multipart.MultipartFile;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.*;
import top.continew.admin.system.constant.MultipartUploadConstants;
import top.continew.admin.system.enums.StorageTypeEnum;
import top.continew.admin.system.factory.S3ClientFactory;
import top.continew.admin.system.handler.StorageHandler;
import top.continew.admin.system.model.entity.StorageDO;
import top.continew.admin.system.model.req.MultipartUploadInitReq;
import top.continew.admin.system.model.resp.file.MultipartUploadInitResp;
import top.continew.admin.system.model.resp.file.MultipartUploadResp;
import top.continew.admin.system.service.FileService;
import top.continew.starter.core.exception.BaseException;
import java.util.*;
import java.util.stream.Collectors;
/**
* S3存储处理器
* <p>使用AWS SDK 2.x版本API。实现分片上传、合并、取消等操作。</p>
*
* @author KAI
* @since 2025/07/30 20:10
*/
@Component
@Slf4j
@RequiredArgsConstructor
public class S3StorageHandler implements StorageHandler {
private final S3ClientFactory s3ClientFactory;
private final FileService fileService;
@Override
public MultipartUploadInitResp initMultipartUpload(StorageDO storageDO, MultipartUploadInitReq req) {
String bucket = storageDO.getBucketName();
String parentPath = req.getParentPath();
String fileName = req.getFileName();
String contentType = req.getContentType();
StrUtil.blankToDefault(parentPath, StrUtil.SLASH);
String relativePath = StrUtil.endWith(parentPath, StrUtil.SLASH) ? parentPath + fileName : parentPath + StrUtil.SLASH + fileName;
fileService.createParentDir(parentPath, storageDO);
try {
// 构建请求
CreateMultipartUploadRequest.Builder requestBuilder = CreateMultipartUploadRequest.builder()
.bucket(bucket)
.key(buildS3Key(relativePath))
.contentType(contentType);
// 添加元数据 暂时注释掉 mataData传递中文会导致签名校验不通过
// if (metaData != null && !metaData.isEmpty()) {
// requestBuilder.metadata(metaData);
// }
S3Client s3Client = s3ClientFactory.getClient(storageDO);
log.info("S3初始化分片上传: bucket={}, key={}, contentType={}", bucket, buildS3Key(relativePath), contentType);
// 执行请求
CreateMultipartUploadResponse response = s3Client.createMultipartUpload(requestBuilder.build());
String uploadId = response.uploadId();
// 构建返回结果
MultipartUploadInitResp result = new MultipartUploadInitResp();
result.setBucket(bucket);
result.setFileId(UUID.randomUUID().toString());
result.setUploadId(uploadId);
result.setPlatform(storageDO.getCode());
result.setFileName(fileName);
result.setFileMd5(req.getFileMd5());
result.setFileSize(req.getFileSize());
result.setExtension(FileUtil.extName(fileName));
result.setContentType(req.getContentType());
result.setPath(relativePath);
result.setParentPath(parentPath);
result.setPartSize(MultipartUploadConstants.MULTIPART_UPLOAD_PART_SIZE);
log.info("S3初始化分片上传成功: uploadId={}, path={}", uploadId, relativePath);
return result;
} catch (Exception e) {
throw new BaseException("S3初始化分片上传失败: " + e.getMessage(), e);
}
}
@Override
public MultipartUploadResp uploadPart(StorageDO storageDO, String path, String uploadId, Integer partNumber, MultipartFile file) {
try {
String bucket = storageDO.getBucketName();
// 读取数据到内存(注意:实际使用时可能需要优化大文件处理)
byte[] bytes = file.getBytes();
// 构建请求
UploadPartRequest request = UploadPartRequest.builder()
.bucket(bucket)
.key(buildS3Key(path))
.uploadId(uploadId)
.partNumber(partNumber)
.contentLength((long) bytes.length)
.build();
// 执行上传
S3Client s3Client = s3ClientFactory.getClient(storageDO);
UploadPartResponse response = s3Client.uploadPart(request, RequestBody.fromBytes(bytes));
// 构建返回结果
MultipartUploadResp result = new MultipartUploadResp();
result.setPartNumber(partNumber);
result.setPartETag(response.eTag());
result.setSuccess(true);
log.info("S3上传分片成功: partNumber={} for key={} with uploadId={}", partNumber, path, uploadId);
log.info("上传分片ETag: {}", response.eTag());
return result;
} catch (Exception e) {
MultipartUploadResp result = new MultipartUploadResp();
result.setPartNumber(partNumber);
result.setSuccess(false);
result.setErrorMessage(e.getMessage());
log.error("S3上传分片失败: partNumber={} for key={} with uploadId={} errorMessage={}", partNumber, path, uploadId, e.getMessage());
return result;
}
}
@Override
public void completeMultipartUpload(StorageDO storageDO, List<MultipartUploadResp> parts, String path, String uploadId, boolean needVerify) {
if (path == null) {
throw new BaseException("无效的uploadId: " + uploadId);
}
String bucket = storageDO.getBucketName();
S3Client s3Client = s3ClientFactory.getClient(storageDO);
// 如果需要验证比较本地记录和S3的分片信息
if (needVerify) {
List<MultipartUploadResp> s3Parts = listParts(bucket, path, uploadId, s3Client);
validateParts(parts, s3Parts);
}
// 构建已完成的分片列表
List<CompletedPart> completedParts = parts.stream()
.filter(MultipartUploadResp::isSuccess)
.map(part -> CompletedPart.builder().partNumber(part.getPartNumber()).eTag(part.getPartETag()).build())
.sorted(Comparator.comparingInt(CompletedPart::partNumber))
.collect(Collectors.toList());
// 构建请求
CompleteMultipartUploadRequest request = CompleteMultipartUploadRequest.builder()
.bucket(bucket)
.key(buildS3Key(path))
.uploadId(uploadId)
.multipartUpload(CompletedMultipartUpload.builder().parts(completedParts).build())
.build();
// 完成上传
s3Client.completeMultipartUpload(request);
log.info("S3完成分片上传: key={}, uploadId={}, parts={}", buildS3Key(path), uploadId, completedParts.size());
}
@Override
public void cleanPart(StorageDO storageDO, String uploadId) {
try {
String bucket = storageDO.getBucketName();
S3Client s3Client = s3ClientFactory.getClient(storageDO);
// 列出所有未完成的分片上传
ListMultipartUploadsRequest listRequest = ListMultipartUploadsRequest.builder()
.bucket(bucket)
.build();
ListMultipartUploadsResponse listResponse = s3Client.listMultipartUploads(listRequest);
// 查找匹配的上传任务
Optional<MultipartUpload> targetUpload = listResponse.uploads().stream()
.filter(upload -> upload.uploadId().equals(uploadId))
.findFirst();
if (targetUpload.isPresent()) {
MultipartUpload upload = targetUpload.get();
// 取消分片上传
AbortMultipartUploadRequest abortRequest = AbortMultipartUploadRequest.builder()
.bucket(bucket)
.key(upload.key())
.uploadId(uploadId)
.build();
s3Client.abortMultipartUpload(abortRequest);
log.info("S3清理分片上传成功: bucket={}, key={}, uploadId={}", bucket, upload.key(), uploadId);
} else {
log.warn("S3未找到对应的分片上传任务: uploadId={}", uploadId);
}
} catch (Exception e) {
log.error("S3清理分片上传失败: uploadId={}, error={}", uploadId, e.getMessage(), e);
throw new BaseException("S3清理分片上传失败: " + e.getMessage(), e);
}
}
@Override
public StorageTypeEnum getType() {
return StorageTypeEnum.OSS;
}
/**
* 列出已上传的分片
*/
public List<MultipartUploadResp> listParts(String bucket, String path, String uploadId, S3Client s3Client) {
try {
// 构建请求
ListPartsRequest request = ListPartsRequest.builder().bucket(bucket).key(buildS3Key(path)).uploadId(uploadId).build();
// 获取分片列表
ListPartsResponse response = s3Client.listParts(request);
// 转换结果
return response.parts().stream().map(part -> {
MultipartUploadResp result = new MultipartUploadResp();
result.setPartNumber(part.partNumber());
result.setPartETag(part.eTag());
result.setSuccess(true);
return result;
}).collect(Collectors.toList());
} catch (Exception e) {
throw new BaseException("S3列出分片失败: " + e.getMessage(), e);
}
}
/**
* 验证分片一致性
*
* @param recordParts 记录部件
* @param s3Parts s3零件
*/
private void validateParts(List<MultipartUploadResp> recordParts, List<MultipartUploadResp> s3Parts) {
Map<Integer, String> recordMap = recordParts.stream()
.collect(Collectors.toMap(MultipartUploadResp::getPartNumber, MultipartUploadResp::getPartETag));
Map<Integer, String> s3Map = s3Parts.stream()
.collect(Collectors.toMap(MultipartUploadResp::getPartNumber, MultipartUploadResp::getPartETag));
// 检查分片数量
if (recordMap.size() != s3Map.size()) {
throw new BaseException(String.format("分片数量不一致: 本地记录=%d, S3=%d", recordMap.size(), s3Map.size()));
}
// 检查每个分片
List<Integer> missingParts = new ArrayList<>();
List<Integer> mismatchParts = new ArrayList<>();
for (Map.Entry<Integer, String> entry : recordMap.entrySet()) {
Integer partNumber = entry.getKey();
String recordETag = entry.getValue();
String s3ETag = s3Map.get(partNumber);
if (s3ETag == null) {
missingParts.add(partNumber);
} else if (!recordETag.equals(s3ETag)) {
mismatchParts.add(partNumber);
}
}
if (!missingParts.isEmpty()) {
throw new BaseException("S3缺失分片: " + missingParts);
}
if (!mismatchParts.isEmpty()) {
throw new BaseException("分片ETag不匹配: " + mismatchParts);
}
}
/**
* 规范化 S3 对象 key去掉前导斜杠合并多余斜杠。
*
* @param rawKey 你传入的完整路径,比如 "/folder//子目录//文件名.png"
* @return 规范化后的 key比如 "folder/子目录/文件名.png"
*/
public static String buildS3Key(String rawKey) {
if (rawKey == null || rawKey.isEmpty()) {
throw new IllegalArgumentException("key 不能为空");
}
// 去掉前导斜杠
while (rawKey.startsWith("/")) {
rawKey = rawKey.substring(1);
}
// 替换连续多个斜杠为一个斜杠
rawKey = rawKey.replaceAll("/+", "/");
return rawKey;
}
}

View File

@@ -0,0 +1,81 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.model.req;
import io.swagger.v3.oas.annotations.media.Schema;
import jakarta.validation.constraints.NotBlank;
import jakarta.validation.constraints.NotNull;
import jakarta.validation.constraints.Min;
import lombok.Data;
import java.io.Serial;
import java.io.Serializable;
import java.util.Map;
/**
* 分片初始化请求参数
*
* @author KAI
* @since 2025/7/30 16:38
*/
@Data
@Schema(description = "分片初始化请求参数")
public class MultipartUploadInitReq implements Serializable {
@Serial
private static final long serialVersionUID = 1L;
/**
* 文件名
*/
@Schema(description = "文件名", example = "example.zip")
@NotBlank(message = "文件名不能为空")
private String fileName;
/**
* 文件大小(字节)
*/
@Schema(description = "文件大小", example = "1048576")
@NotNull(message = "文件大小不能为空")
@Min(value = 1, message = "文件大小必须大于0")
private Long fileSize;
/**
* 文件MD5值
*/
@Schema(description = "文件MD5值", example = "5d41402abc4b2a76b9719d911017c592")
@NotBlank(message = "文件MD5值不能为空")
private String fileMd5;
/**
* 文件MIME类型
*/
@Schema(description = "文件MIME类型", example = "application/zip")
private String contentType;
/**
* 存储路径
*/
@Schema(description = "存储父路径", example = "/upload/files/")
private String parentPath;
/**
* 文件元信息
*/
@Schema(description = "文件元信息")
private Map<String,String> metaData;
}

View File

@@ -0,0 +1,55 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.model.req;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.Data;
/**
* 分片上传请求参数
*
* @author KAI
* @since 2025/7/30 16:40
*/
@Data
@Schema(description = "分片上传请求参数")
public class MultipartUploadReq {
/**
* 上传ID
*/
@Schema(description = "上传ID")
private String uploadId;
/**
* 分片序号
*/
@Schema(description = "分片序号")
private Integer partNumber;
/**
* 分片ETag
*/
@Schema(description = "分片ETag")
private String eTag;
/**
* 存储编码
*/
@Schema(description = "存储编码")
private String storageCode;
}

View File

@@ -0,0 +1,94 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.model.resp.file;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.Data;
import java.io.Serializable;
import java.time.LocalDateTime;
/**
* 文件分片信息
*
* @author echo
* @since 2.14.0
*/
@Data
@Schema(description = "文件分片信息")
public class FilePartInfo implements Serializable {
/**
* 文件ID
*/
@Schema(description = "文件ID")
private String fileId;
/**
* 分片编号从1开始
*/
@Schema(description = "分片编号从1开始")
private Integer partNumber;
/**
* 分片大小
*/
@Schema(description = "分片大小")
private Long partSize;
/**
* 分片MD5
*/
@Schema(description = "分片MD5")
private String partMd5;
/**
* 分片ETagS3返回的标识
*/
@Schema(description = "分片ETag")
private String partETag;
/**
* 上传IDS3分片上传标识
*/
@Schema(description = "上传ID")
private String uploadId;
/**
* 上传时间
*/
@Schema(description = "上传时间")
private LocalDateTime uploadTime;
/**
* 状态UPLOADING, SUCCESS, FAILED
*/
@Schema(description = "状态")
private String status;
/**
* 存储桶
*/
@Schema(description = "存储桶")
private String bucket;
/**
* 文件路径
*/
@Schema(description = "文件路径")
private String path;
}

View File

@@ -0,0 +1,120 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.model.resp.file;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.Data;
import java.io.Serializable;
import java.util.Set;
/**
* 分片上传初始化结果
*
* @author echo
* @since 2.14.0
*/
@Data
@Schema(description = "分片初始化响应参数")
public class MultipartUploadInitResp implements Serializable {
/**
* 文件ID
*/
@Schema(description = "文件ID")
private String fileId;
/**
* 上传IDS3返回的uploadId
*/
@Schema(description = "上传ID")
private String uploadId;
/**
* 存储桶
*/
@Schema(description = "存储桶")
private String bucket;
/**
* 存储平台
*/
@Schema(description = "存储平台")
private String platform;
/**
* 文件名称
*/
@Schema(description = "文件名称")
private String fileName;
/**
* 文件MD5
*/
@Schema(description = "文件MD5")
private String fileMd5;
/**
* 文件大小
*/
@Schema(description = "文件大小")
private long fileSize;
/**
* 扩展名
*/
@Schema(description = "扩展名")
private String extension;
/**
* 内容类型
*/
@Schema(description = "内容类型")
private String contentType;
/**
* 文件类型
*/
@Schema(description = "文件类型")
private String type;
/**
* 文件父路径
*/
@Schema(description = "文件父路径")
private String parentPath;
/**
* 文件路径
*/
@Schema(description = "文件路径")
private String path;
/**
* 分片大小
*/
@Schema(description = "分片大小")
private Long partSize;
/**
* 已上传分片编号集合
*/
@Schema(description = "已上传分片编号集合")
private Set<Integer> uploadedPartNumbers;
}

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2022-present Charles7c Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.continew.admin.system.model.resp.file;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.Data;
import java.io.Serializable;
/**
* 分片上传结果
*
* @author echo
* @since 2.14.0
*/
@Data
@Schema(description = "分片上传响应参数")
public class MultipartUploadResp implements Serializable {
/**
* 分片编号
*/
@Schema(description = "分片编号")
private Integer partNumber;
/**
* 分片ETag
*/
@Schema(description = "分片ETag")
private String partETag;
/**
* 分片大小
*/
@Schema(description = "分片大小")
private Long partSize;
/**
* 是否成功
*/
@Schema(description = "是否成功")
private boolean success;
/**
* 错误信息
*/
@Schema(description = "错误信息")
private String errorMessage;
}

View File

@@ -21,6 +21,7 @@ import org.dromara.x.file.storage.core.FileInfo;
import org.springframework.web.multipart.MultipartFile;
import top.continew.admin.common.base.service.BaseService;
import top.continew.admin.system.model.entity.FileDO;
import top.continew.admin.system.model.entity.StorageDO;
import top.continew.admin.system.model.query.FileQuery;
import top.continew.admin.system.model.req.FileReq;
import top.continew.admin.system.model.resp.file.FileResp;
@@ -148,6 +149,18 @@ public interface FileService extends BaseService<FileResp, FileResp, FileQuery,
*/
Long countByStorageIds(List<Long> storageIds);
/**
* 创建上级文件夹(支持多级)
*
* <p>
* user/avatar/ => userpath/user、avatarpath/user/avatar
* </p>
*
* @param parentPath 上级目录
* @param storage 存储配置
*/
void createParentDir(String parentPath, StorageDO storage);
/**
* 获取默认上级目录
*

View File

@@ -0,0 +1,24 @@
package top.continew.admin.system.service;
import org.springframework.web.multipart.MultipartFile;
import top.continew.admin.system.model.entity.FileDO;
import top.continew.admin.system.model.req.MultipartUploadInitReq;
import top.continew.admin.system.model.resp.file.MultipartUploadInitResp;
import top.continew.admin.system.model.resp.file.MultipartUploadResp;
/**
* 分片上传业务接口
*
* @author KAI
* @since 2025/7/3 8:42
*/
public interface MultipartUploadService {
MultipartUploadInitResp initMultipartUpload(MultipartUploadInitReq multiPartUploadInitReq);
MultipartUploadResp uploadPart(MultipartFile file, String uploadId, Integer partNumber,String path);
FileDO completeMultipartUpload(String uploadId);
void cancelMultipartUpload(String uploadId);
}

View File

@@ -278,7 +278,8 @@ public class FileServiceImpl extends BaseServiceImpl<FileMapper, FileDO, FileRes
* @param parentPath 上级目录
* @param storage 存储配置
*/
private void createParentDir(String parentPath, StorageDO storage) {
@Override
public void createParentDir(String parentPath, StorageDO storage) {
if (StrUtil.isBlank(parentPath) || StringConstants.SLASH.equals(parentPath)) {
return;
}

View File

@@ -0,0 +1,185 @@
package top.continew.admin.system.service.impl;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.StrUtil;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;
import top.continew.admin.system.constant.MultipartUploadConstants;
import top.continew.admin.system.dao.MultipartUploadDao;
import top.continew.admin.system.enums.FileTypeEnum;
import top.continew.admin.system.factory.StorageHandlerFactory;
import top.continew.admin.system.handler.StorageHandler;
import top.continew.admin.system.handler.impl.LocalStorageHandler;
import top.continew.admin.system.model.entity.FileDO;
import top.continew.admin.system.model.entity.StorageDO;
import top.continew.admin.system.model.req.MultipartUploadInitReq;
import top.continew.admin.system.model.resp.file.FilePartInfo;
import top.continew.admin.system.model.resp.file.MultipartUploadInitResp;
import top.continew.admin.system.model.resp.file.MultipartUploadResp;
import top.continew.admin.system.service.FileService;
import top.continew.admin.system.service.MultipartUploadService;
import top.continew.admin.system.service.StorageService;
import top.continew.starter.core.exception.BaseException;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/**
* 分片上传业务实现
*
* @author KAI
* @since 2025/7/31 9:30
*/
@Service
@RequiredArgsConstructor
public class MultipartUploadServiceImpl implements MultipartUploadService {
private final StorageService storageService;
private final StorageHandlerFactory storageHandlerFactory;
private final MultipartUploadDao multipartUploadDao;
private final FileService fileService;
@Override
public MultipartUploadInitResp initMultipartUpload(MultipartUploadInitReq multiPartUploadInitReq) {
// 后续可以增加storageCode参数 指定某个存储平台 当前设计是默认存储平台
StorageDO storageDO = storageService.getByCode(null);
// 根据文件Md5查询当前存储平台是否初始化过分片
String uploadId = multipartUploadDao.getUploadIdByMd5(multiPartUploadInitReq
.getFileMd5());
if (StrUtil.isNotBlank(uploadId)) {
MultipartUploadInitResp multipartUpload = multipartUploadDao.getMultipartUpload(uploadId);
//对比存储平台和分片大小是否一致 一致则返回结果
if (multipartUpload != null
&& multipartUpload.getPartSize().equals(MultipartUploadConstants.MULTIPART_UPLOAD_PART_SIZE)
&& multipartUpload.getPlatform().equals(storageDO.getCode())) {
// 获取已上传分片信息
List<FilePartInfo> fileParts = multipartUploadDao.getFileParts(uploadId);
Set<Integer> partNumbers = fileParts.stream().map(FilePartInfo::getPartNumber).collect(Collectors.toSet());
multipartUpload.setUploadedPartNumbers(partNumbers);
return multipartUpload;
}
//todo else 待定 更换存储平台 或分片大小有变更 是否需要删除原先分片
}
StorageHandler storageHandler = storageHandlerFactory.createHandler(storageDO.getType());
//文件元信息
Map<String, String> metaData = multiPartUploadInitReq.getMetaData();
MultipartUploadInitResp multipartUploadInitResp = storageHandler.initMultipartUpload(storageDO, multiPartUploadInitReq);
// 缓存文件信息,md5和uploadId映射
multipartUploadDao.setMultipartUpload(multipartUploadInitResp.getUploadId(), multipartUploadInitResp, metaData);
multipartUploadDao.setMd5Mapping(multiPartUploadInitReq.getFileMd5(), multipartUploadInitResp.getUploadId());
return multipartUploadInitResp;
}
@Override
public MultipartUploadResp uploadPart(MultipartFile file, String uploadId, Integer partNumber, String path) {
StorageDO storageDO = storageService.getByCode(null);
StorageHandler storageHandler = storageHandlerFactory.createHandler(storageDO.getType());
MultipartUploadResp resp = storageHandler.uploadPart(storageDO, path, uploadId, partNumber, file);
FilePartInfo partInfo = new FilePartInfo();
partInfo.setUploadId(uploadId);
partInfo.setBucket(storageDO.getBucketName());
partInfo.setPath(path);
partInfo.setPartNumber(partNumber);
partInfo.setPartETag(resp.getPartETag());
partInfo.setPartSize(resp.getPartSize());
partInfo.setStatus("SUCCESS");
partInfo.setUploadTime(LocalDateTime.now());
multipartUploadDao.setFilePart(uploadId, partInfo);
return resp;
}
@Override
public FileDO completeMultipartUpload(String uploadId) {
StorageDO storageDO = storageService.getByCode(null);
// 从 FileRecorder 获取所有分片信息
List<FilePartInfo> recordedParts = multipartUploadDao.getFileParts(uploadId);
MultipartUploadInitResp initResp = multipartUploadDao.getMultipartUpload(uploadId);
// 转换为 MultipartUploadResp
List<MultipartUploadResp> parts = recordedParts.stream().map(partInfo -> {
MultipartUploadResp resp = new MultipartUploadResp();
resp.setPartNumber(partInfo.getPartNumber());
resp.setPartETag(partInfo.getPartETag());
resp.setPartSize(partInfo.getPartSize());
resp.setSuccess("SUCCESS".equals(partInfo.getStatus()));
return resp;
}).collect(Collectors.toList());
// 如果没有记录,使用客户端传入的分片信息
if (parts.isEmpty()) {
throw new BaseException("没有找到任何分片信息");
}
// 验证分片完整性
validatePartsCompleteness(parts);
// 获取策略,判断是否需要验证
boolean needVerify = true;
StorageHandler storageHandler = storageHandlerFactory.createHandler(storageDO.getType());
if (storageHandler instanceof LocalStorageHandler) {
needVerify = false;
}
// 完成上传
storageHandler.completeMultipartUpload(storageDO, parts, initResp.getPath(), uploadId, needVerify);
FileDO file = new FileDO();
file.setName(initResp.getFileName().replaceFirst("^[/\\\\]+", ""));
file.setOriginalName(initResp.getFileName().replaceFirst("^[/\\\\]+", ""));
file.setPath(initResp.getPath());
file.setParentPath(initResp.getParentPath());
file.setSize(initResp.getFileSize());
file.setSha256(initResp.getFileMd5());
file.setExtension(initResp.getExtension());
file.setContentType(initResp.getContentType());
file.setType(FileTypeEnum.getByExtension(FileUtil.extName(initResp.getFileName())));
file.setStorageId(storageDO.getId());
fileService.save(file);
multipartUploadDao.deleteMultipartUpload(uploadId);
return file;
}
@Override
public void cancelMultipartUpload(String uploadId) {
StorageDO storageDO = storageService.getByCode(null);
multipartUploadDao.deleteMultipartUploadAll(uploadId);
StorageHandler storageHandler = storageHandlerFactory.createHandler(storageDO.getType());
storageHandler.cleanPart(storageDO, uploadId);
}
/**
* 验证分片完整性
*
* @param parts 分片信息
*/
private void validatePartsCompleteness(List<MultipartUploadResp> parts) {
if (parts.isEmpty()) {
throw new BaseException("没有找到任何分片信息");
}
// 检查分片编号连续性
List<Integer> partNumbers = parts.stream().map(MultipartUploadResp::getPartNumber).sorted().toList();
for (int i = 0; i < partNumbers.size(); i++) {
if (partNumbers.get(i) != i + 1) {
throw new BaseException("分片编号不连续,缺失分片: " + (i + 1));
}
}
// 检查是否所有分片都成功
List<Integer> failedParts = parts.stream()
.filter(part -> !part.isSuccess())
.map(MultipartUploadResp::getPartNumber)
.toList();
if (!failedParts.isEmpty()) {
throw new BaseException("存在失败的分片: " + failedParts);
}
}
}