文章目錄
- 前言
- 一、申請(qǐng)阿里云oss
- 二、上代碼
- 總結(jié)
前言
? ? ? 阿里云對(duì)象存儲(chǔ)OSS(Object Storage Service)是一款海量、安全、低成本、高可靠的云存儲(chǔ)服務(wù),可提供99.9999999999%(12個(gè)9)的數(shù)據(jù)持久性,99.995%的數(shù)據(jù)可用性。多種存儲(chǔ)類型供選擇,全面優(yōu)化存儲(chǔ)成本。
? ? 您可以使用阿里云提供的API、SDK接口或者OSS遷移工具輕松地將海量數(shù)據(jù)移入或移出阿里云OSS。數(shù)據(jù)存儲(chǔ)到阿里云OSS以后,您可以選擇標(biāo)準(zhǔn)存儲(chǔ)(Standard)作為移動(dòng)應(yīng)用、大型網(wǎng)站、圖片分享或熱點(diǎn)音視頻的主要存儲(chǔ)方式,也可以選擇成本更低、存儲(chǔ)期限更長(zhǎng)的低頻訪問存儲(chǔ)(Infrequent Access)、歸檔存儲(chǔ)(Archive)、冷歸檔存儲(chǔ)(Cold Archive)或者深度冷歸檔(Deep Cold Archive)作為不經(jīng)常訪問數(shù)據(jù)的存儲(chǔ)方式。
一、申請(qǐng)阿里云oss
阿里云-計(jì)算,為了無法計(jì)算的價(jià)值
選擇對(duì)象存儲(chǔ)oss
?選擇立即開通
?創(chuàng)建bucket(桶)
創(chuàng)建Bucket 名稱
選擇就近地域
其他的默認(rèn)就可
?獲取AccessKeyID 和AccessKeySecret
?
?文章來源:http://www.zghlxwxcb.cn/news/detail-732138.html
二、使用步驟
1.引入oss依賴
<!-- oss -->
<dependency>
<groupId>com.aliyun.oss</groupId>
<artifactId>aliyun-sdk-oss</artifactId>
<version>3.8.0</version>
</dependency>
2.阿里云配置
? 在resourse下新建?aliyunOSS.properties文件文章來源地址http://www.zghlxwxcb.cn/news/detail-732138.html
#之前獲取的AccessKeyID
aliyun.AccessKeyID=
#之前獲取的AccessKeySecret
aliyun.AccessKeySecret=
#你創(chuàng)建桶的名稱
aliyun.Buckets=
#桶的外網(wǎng)地址 在桶的概覽下的外網(wǎng)地址 例:oss-cn-hangzhou.aliyuncs.com
aliyun.EndPoint=
#自定義前綴
aliyun.prefix=
#限制單個(gè)上傳的最大的MB
aliyun.MAX_SIZE =
3.oss工具類
/**
* @Description
* @Author LuoAC
* @Date 2023/6/19 16:38
*/
public class test {
/**
* 阿里云的配置參數(shù)
*/
private static String accessKeyId = null;
private static String accessKeySecret = null;
private static String endpoint = null;
private static String bucketName = null;
private static Integer MAX_SIZE = null;
/**
* 存儲(chǔ)在OSS中的前綴名
*/
private static String file_prefix = null;
private static volatile OSSClient ossClient = null;
private static volatile RedisCache redisManager = null;
static {
//初始化AccessKey
accessKeyId = PropertiesReader.get("aliyun.AccessKeyID");
//初始化AccessKeySecret
accessKeySecret = PropertiesReader.get("aliyun.AccessKeySecret");
//初始化Endpoint
endpoint = PropertiesReader.get("aliyun.EndPoint");
//初始化bucketName
bucketName = PropertiesReader.get("aliyun.Buckets");
//初始化前綴
file_prefix = PropertiesReader.get("aliyun.prefix");
//文件最大值
MAX_SIZE = PropertiesReader.getInteger("aliyun.MAX_SIZE");
}
/**
* @return skipUpload 判斷文件是否存在的標(biāo)識(shí)。true即上傳完成。再次上傳可秒傳
* @return key 文件key
* @return bucket 桶名稱
* @return uploadId 上傳id
* @return uploaded 已經(jīng)上傳分片的list<Integer>
* @Description 首次分片 信息
* @Author LuoAC
* @Date 2023/6/9 9:55
*/
public static Map ossCheck(UploadChunkFileParam param) {
RedisCache redisManager = initRedisManager();
//文件md5
String identifier = param.getIdentifier();
//文件后綴
String suffix = param.getSuffix();
//文件的key
String key = identifier + "." + suffix;
Map<String, Object> map = MapUtil.newHashMap();
// 判斷是否上傳過 秒傳
if (checkExist(key)) {
map.put("skipUpload", true);
map.put("key", key);
map.put("bucket", bucketName);
return map;
}
// 判斷是否第一次上傳 是否做斷點(diǎn)續(xù)傳
String uploadId = redisManager.getCacheObject(key);
//第一次上傳
if (StringUtils.isEmpty(uploadId)) {
String uploadIdNew = uploadChunkInit(key);
map.put("skipUpload", false);
map.put("uploadId", uploadIdNew);
map.put("uploaded", null);
redisManager.setCacheObject(key, uploadIdNew);
return map;
} else {
// 繼續(xù)上傳
Map<String, String> uploadedCache = redisManager.hmget(CacheConstants.REDIS_ALI_OSS_KEY + uploadId);
List<Integer> uploaded = Lists.newArrayList();
for (Map.Entry<String, String> entry : uploadedCache.entrySet()) {
uploaded.add(JSONUtil.toBean(entry.getValue(), PartETag.class).getPartNumber());
}
map.put("skipUpload", false);
map.put("uploadId", uploadId);
map.put("uploaded", uploaded);
return map;
}
}
/**
* 分片上傳
*
* @param param 上傳參數(shù)
* @return
*/
public static Map uploadChunk(UploadChunkFileParam param) {
return uploadChunk(param.getUploadId(), param.getIdentifier(), param.getSuffix(), param.getFile(), param.getChunkNumber(),
param.getCurrentChunkSize(), param.getTotalChunks(), param.getFilename(),param.getProjectId(),param.getDirId());
}
/**
* 分片上傳
* 1、檢查文件是否上傳
* 2、檢查文件是否第一次上傳,第一次上傳創(chuàng)建上傳id uploadId
* 3、檢查是否是斷點(diǎn)續(xù)傳,如果是返回已上傳的分片
* 4、分片上傳到阿里云OSS上,并記錄上傳信息到Redis
* 5、判斷是否已上傳完成,已完成:合并所有分片為源文件
*
* @param uploadId 上傳id
* @param identifier 文件在OSS上的key
* @param file 文件分片
* @param chunkIndex 分片索引
* @param chunkSize 分片大小
* @param chunkCount 總分片數(shù)w
* @return
*/
public static Map uploadChunk(String uploadId, String identifier, String suffix, MultipartFile file, Integer chunkIndex,
long chunkSize, Integer chunkCount, String fileName, Integer projectId, String dirId) {
ossClient = initOSS();
String key=identifier+"."+suffix;
try {
Map<String, Object> map = MapUtil.newHashMap();
RedisCache redisManager = initRedisManager();
// 上傳分片
PartETag partETag = uploadChunkPart(uploadId, key, file.getInputStream(), chunkIndex, chunkSize, chunkCount);
// 分片上傳完成緩存key
redisManager.hset(CacheConstants.REDIS_ALI_OSS_KEY + uploadId, chunkIndex + ",", JSONUtil.toJsonStr(partETag));
// 取出所有已上傳的分片信息
Map<String, String> dataMap = redisManager.hmget(CacheConstants.REDIS_ALI_OSS_KEY + uploadId);
List<PartETag> partETagList = Lists.newArrayList();
//已經(jīng)上傳的片數(shù)
Integer i = 0;
for (Map.Entry<String, String> entry : dataMap.entrySet()) {
partETagList.add(JSONUtil.toBean(entry.getValue(), PartETag.class));
i++;
}
List<Integer> list = partETagList.stream().map(PartETag::getPartNumber).collect(Collectors.toList());
// 已上傳的百分比
String percent = String.format("%.2f", (double) i / chunkCount);
// 分片上緩存的待傳百分比
UploadChunkFileParam uploadChunkFileParam = new UploadChunkFileParam();
uploadChunkFileParam.setPercent(percent);
uploadChunkFileParam.setFilename(fileName);
uploadChunkFileParam.setProjectId(projectId);
uploadChunkFileParam.setDirId(dirId);
uploadChunkFileParam.setUploaded(list);
uploadChunkFileParam.setIdentifier(identifier);
uploadChunkFileParam.setSuffix(suffix);
uploadChunkFileParam.setUploadId(uploadId);
redisManager.setCacheMapValue(CacheConstants.REDIS_ALI_OSS_KEY + SecurityUtils.getUserId(),uploadId+","+key,uploadChunkFileParam);
// 判斷是否上傳完成
if (dataMap.keySet().size() == chunkCount) {
uploadChunkComplete(uploadId, key, partETagList);
for (String mapKey : dataMap.keySet()) {
redisManager.hdel(CacheConstants.REDIS_ALI_OSS_KEY + uploadId, mapKey);
}
redisManager.deleteObject(key);
redisManager.hdel(CacheConstants.REDIS_ALI_OSS_KEY+SecurityUtils.getUserId(),uploadId+","+key);
map.put("skipUpload", true);
map.put("uploadId", uploadId);
map.put("key", key);
map.put("bucket", bucketName);
map.put("fileName",fileName);
ossClient.setObjectAcl(bucketName, key, CannedAccessControlList.PublicRead);
} else {
map.put("uploaded", list);
map.put("skipUpload", false);
map.put("uploadId", uploadId);
}
return map;
} catch (Exception e) {
e.printStackTrace();
throw new CustomException("上傳失?。? + e.getMessage());
}
}
/**
* 上傳分片文件
*
* @param uploadId 上傳id
* @param key key
* @param instream 文件分片流
* @param chunkIndex 分片索引
* @param chunkSize 分片大小
* @return
*/
public static PartETag uploadChunkPart(String uploadId, String key, InputStream instream,
Integer chunkIndex, long chunkSize, Integer chunkCount) {
ossClient = initOSS();
try {
UploadPartRequest partRequest = new UploadPartRequest();
// 阿里云 oss 文件根目錄
partRequest.setBucketName(bucketName);
// 文件key
partRequest.setKey(key);
// 分片上傳uploadId
partRequest.setUploadId(uploadId);
// 分片文件
partRequest.setInputStream(instream);
// 分片大小。除了最后一個(gè)分片沒有大小限制,其他的分片最小為100 KB。
partRequest.setPartSize(chunkSize);
System.out.println(chunkSize + " " + chunkIndex + " " + uploadId);
// 分片號(hào)。每一個(gè)上傳的分片都有一個(gè)分片號(hào),取值范圍是1~10000,如果超出這個(gè)范圍,OSS將返回InvalidArgument的錯(cuò)誤碼。
partRequest.setPartNumber(chunkIndex);
// 每個(gè)分片不需要按順序上傳,甚至可以在不同客戶端上傳,OSS會(huì)按照分片號(hào)排序組成完整的文件。
UploadPartResult uploadPartResult = ossClient.uploadPart(partRequest);
// 每次上傳分片之后,OSS的返回結(jié)果包含PartETag。PartETag將被保存在redis中。
return uploadPartResult.getPartETag();
} catch (Exception e) {
e.printStackTrace();
throw new CustomException("分片上傳失?。? + e.getMessage());
}
}
/**
* 文件合并
*
* @param uploadId 上傳id
* @param key key
* @param chunkTags 分片上傳信息
* @return
*/
public static CompleteMultipartUploadResult uploadChunkComplete(String uploadId, String key, List<PartETag> chunkTags) {
ossClient = initOSS();
try {
CompleteMultipartUploadRequest completeMultipartUploadRequest =
new CompleteMultipartUploadRequest(bucketName, key, uploadId, chunkTags);
CompleteMultipartUploadResult result = ossClient.completeMultipartUpload(completeMultipartUploadRequest);
return result;
} catch (Exception e) {
e.printStackTrace();
throw new CustomException("分片合并失?。? + e.getMessage());
}
}
/**
* @Description 初始化OSSClient
* @Author LuoAC
* @Date 2023/6/8 10:53
*/
private static OSSClient initOSS() {
if (ossClient == null) {
synchronized (OSSClient.class) {
if (ossClient == null) {
ossClient = new OSSClient(endpoint, new DefaultCredentialProvider(accessKeyId, accessKeySecret),
new ClientConfiguration());
}
}
}
return ossClient;
}
/**
* 初始化上傳id uploadId
*
* @param key
* @return
*/
public static String uploadChunkInit(String key) {
if (StringUtils.isEmpty(key)) {
throw new CustomException("key不能為空");
}
ossClient = initOSS();
try {
// 創(chuàng)建分片上傳對(duì)象
InitiateMultipartUploadRequest uploadRequest = new InitiateMultipartUploadRequest(bucketName, key);
// 初始化分片
InitiateMultipartUploadResult result = ossClient.initiateMultipartUpload(uploadRequest);
// 返回uploadId,它是分片上傳事件的唯一標(biāo)識(shí),您可以根據(jù)這個(gè)uploadId發(fā)起相關(guān)的操作,如取消分片上傳、查詢分片上傳等。
return result.getUploadId();
} catch (Exception e) {
e.printStackTrace();
throw new CustomException("初始化分片失?。? + e.getMessage());
}
}
/**
* @Description 判斷桶中是否存在這個(gè)key
* @Author LuoAC
* @Date 2023/6/8 14:00
*/
public static Boolean checkExist(String key) {
ossClient = initOSS();
return ossClient.doesObjectExist(bucketName, key);
}
/**
* @Description 獲取redis
* @Author LuoAC
* @Date 2023/6/8 14:11
*/
private static RedisCache initRedisManager() {
if (redisManager == null) {
synchronized (RedisCache.class) {
if (redisManager == null) {
return SpringUtils.getBean(RedisCache.class);
}
}
}
return redisManager;
}
}
package com.hzzd.web.services.domain;
import com.baomidou.mybatisplus.annotation.TableField;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import org.springframework.web.multipart.MultipartFile;
import java.util.List;
/**
* @Description
* @Author LuoAC
* @Date 2023/6/7 17:18
*/
@Data
public class UploadChunkFileParam {
/**
* 文件傳輸任務(wù)ID
* 文件MD5編碼
*/
private String identifier;
/**
* 文件全名稱 例如:123.png
*/
private String filename;
/** 后綴*/
private String suffix;
/**
* 主體類型--這個(gè)字段是我項(xiàng)目中的其他業(yè)務(wù)邏輯可以忽略
*/
private String objectType;
/**
* 分片總數(shù)
*/
private int totalChunks;
/**
* 每個(gè)分塊的大小
*/
private long chunkSize;
/**
* 當(dāng)前為第幾分片
*/
private int chunkNumber;
/**
* 當(dāng)前分片大小
*/
private long currentChunkSize;
/**
* 分塊文件傳輸對(duì)象
*/
private MultipartFile file;
/**
* oss上傳時(shí)的上傳id
*/
private String uploadId;
/**
* oss上傳時(shí)的文件key
*/
private String key;
/**
* oss上傳時(shí)的文件key
*/
private String percent;
@ApiModelProperty("工程id")
private Integer projectId;
@ApiModelProperty("目錄id")
private String dirId;
/**
* 未上傳的id
*/
private List<Integer> uploaded;
}
oss 分片上傳筆記
到了這里,關(guān)于spring boot 阿里云oss 文件分片上傳、斷點(diǎn)續(xù)傳的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!