我爱普洱茶 发表于 2025-3-31 16:34:50

前端大文件断点续传完整实现指南:原理、安全战略与代码实战

一、断点续传焦点原理

1.1 技术架构计划

   1.2 焦点流程

   二、前端焦点实现

2.1 文件分片处理

class FileSplitter {
constructor(file, chunkSize = 5 * 1024 * 1024) {
    this.file = file
    this.chunkSize = chunkSize
    this.totalChunks = Math.ceil(file.size / chunkSize)
    this.currentChunk = 0
}

async* getChunk() {
    while (this.currentChunk < this.totalChunks) {
      const start = this.currentChunk * this.chunkSize
      const end = Math.min(start + this.chunkSize, this.file.size)
      const chunk = this.file.slice(start, end)
      yield {
      chunk,
      index: this.currentChunk,
      total: this.totalChunks,
      hash: await this.calculateHash(chunk)
      }
      this.currentChunk++
    }
}

async calculateHash(chunk) {
    const buffer = await chunk.arrayBuffer()
    const hashBuffer = await crypto.subtle.digest('SHA-256', buffer)
    return Array.from(new Uint8Array(hashBuffer))
      .map(b => b.toString(16).padStart(2, '0'))
      .join('')
}
}
2.2 上传控制器

class UploadController {
private file: File
private chunks: UploadChunk[]
private concurrentLimit = 3
private retryLimit = 3
private uploadedChunks = new Set<number>()
private progressCallbacks: Function[] = []

constructor(file: File) {
    this.file = file
    this.initChunks()
    this.loadProgress()
}

private initChunks() {
    const splitter = new FileSplitter(this.file)
    this.chunks = Array.from({length: splitter.totalChunks}, (_, i) => ({
      index: i,
      status: 'pending',
      retries: 0
    }))
}

async start() {
    const queue = new AsyncQueue(this.concurrentLimit)
    for (const chunk of this.chunks) {
      if (!this.uploadedChunks.has(chunk.index)) {
      queue.add(() => this.uploadChunk(chunk))
      }
    }
    await queue.complete()
    await this.mergeFile()
}

private async uploadChunk(chunk: UploadChunk) {
    try {
      const splitter = new FileSplitter(this.file)
      const chunkData = await splitter.getChunk(chunk.index)
      
      const formData = new FormData()
      formData.append('file', chunkData.chunk)
      formData.append('index', chunk.index.toString())
      formData.append('total', chunkData.total.toString())
      formData.append('hash', chunkData.hash)

      const response = await fetch('/api/upload', {
      method: 'POST',
      body: formData
      })

      if (!response.ok) throw new Error('Upload failed')
      
      this.uploadedChunks.add(chunk.index)
      this.saveProgress()
      this.emitProgress()
    } catch (error) {
      if (chunk.retries < this.retryLimit) {
      chunk.retries++
      return this.uploadChunk(chunk)
      }
      throw error
    }
}
}
三、服务端关键实现(Node.js)

3.1 分片接收接口

const express = require('express')
const fs = require('fs-extra')
const multer = require('multer')
const upload = multer({ dest: 'temp/' })

const app = express()
const activeUploads = new Map()

app.post('/api/upload', upload.single('file'), async (req, res) => {
const { index, total, hash } = req.body
const fileKey = `${req.file.originalname}-${hash}`

// 校验分片哈希
const chunkHash = await calculateHash(req.file.path)
if (chunkHash !== hash) {
    fs.remove(req.file.path)
    return res.status(400).send('Invalid chunk hash')
}

// 存储分片信息
if (!activeUploads.has(fileKey)) {
    activeUploads.set(fileKey, {
      totalChunks: parseInt(total),
      receivedChunks: new Set()
    })
}

const uploadInfo = activeUploads.get(fileKey)
uploadInfo.receivedChunks.add(parseInt(index))

// 返回已接收分片
res.json({
    received: Array.from(uploadInfo.receivedChunks)
})
})
3.2 文件归并接口

app.post('/api/merge', async (req, res) => {
const { fileName, total, hash } = req.body
const fileKey = `${fileName}-${hash}`

const uploadInfo = activeUploads.get(fileKey)
if (!uploadInfo || uploadInfo.receivedChunks.size < uploadInfo.totalChunks) {
    return res.status(400).send('Not all chunks received')
}

// 合并文件
const finalPath = path.join('uploads', fileName)
const writeStream = fs.createWriteStream(finalPath)

for (let i = 0; i < uploadInfo.totalChunks; i++) {
    const chunkPath = path.join('temp', `${fileKey}-${i}`)
    const chunkBuffer = await fs.readFile(chunkPath)
    writeStream.write(chunkBuffer)
    await fs.remove(chunkPath)
}

writeStream.end()

// 校验最终文件
const finalHash = await calculateHash(finalPath)
if (finalHash !== hash) {
    await fs.remove(finalPath)
    return res.status(500).send('File verification failed')
}

activeUploads.delete(fileKey)
res.sendStatus(200)
})
四、安全增强方案

4.1 全链路加密验证

// 前端加密配置
const encryptChunk = async (chunk, publicKey) => {
const encoder = new TextEncoder()
const data = encoder.encode(chunk)
const encrypted = await window.crypto.subtle.encrypt(
    { name: 'RSA-OAEP' },
    publicKey,
    data
)
return new Blob()
}

// 服务端解密
const decryptChunk = async (encrypted, privateKey) => {
const buffer = await encrypted.arrayBuffer()
return crypto.subtle.decrypt(
    { name: 'RSA-OAEP' },
    privateKey,
    buffer
)
}
4.2 安全防护战略

// 客户端防御
const validateFile = (file) => {
const MAX_SIZE = 10 * 1024 * 1024 * 1024 // 10GB
const ALLOW_TYPES = ['video/mp4', 'image/png']

if (file.size > MAX_SIZE) throw new Error('文件过大')
if (!ALLOW_TYPES.includes(file.type)) throw new Error('文件类型不支持')
}

// 服务端防御
const antiVirusScan = async (filePath) => {
const result = await clamscan.scanFile(filePath)
if (result.viruses.length > 0) {
    throw new Error('发现恶意文件')
}
}
五、可靠性保障机制

5.1 断点恢复实现

class UploadRecovery {
static STORAGE_KEY = 'upload_progress'

static saveProgress(fileHash, chunks) {
    const progress = localStorage.getItem(STORAGE_KEY) || {}
    progress = chunks
    localStorage.setItem(STORAGE_KEY, JSON.stringify(progress))
}

static loadProgress(fileHash) {
    const progress = JSON.parse(localStorage.getItem(STORAGE_KEY))
    return progress || []
}

static clearProgress(fileHash) {
    const progress = JSON.parse(localStorage.getItem(STORAGE_KEY))
    delete progress
    localStorage.setItem(STORAGE_KEY, JSON.stringify(progress))
}
}
5.2 分片校验流程

   六、性能优化方案

6.1 智能分片战略

function calculateChunkSize(fileSize) {
const MIN_CHUNK = 1 * 1024 * 1024// 1MB
const MAX_CHUNK = 10 * 1024 * 1024 // 10MB
const TARGET_CHUNKS = 100

const idealSize = Math.ceil(fileSize / TARGET_CHUNKS)
return Math.min(MAX_CHUNK, Math.max(MIN_CHUNK, idealSize))
}
6.2 并发控制优化

class AsyncQueue {
constructor(concurrency = 3) {
    this.pending = []
    this.inProgress = 0
    this.concurrency = concurrency
}

add(task) {
    return new Promise((resolve, reject) => {
      this.pending.push({ task, resolve, reject })
      this.run()
    })
}

run() {
    while (this.inProgress < this.concurrency && this.pending.length) {
      const { task, resolve, reject } = this.pending.shift()
      this.inProgress++
      task()
      .then(resolve)
      .catch(reject)
      .finally(() => {
          this.inProgress--
          this.run()
      })
    }
}

async complete() {
    while (this.pending.length || this.inProgress) {
      await new Promise(resolve => setTimeout(resolve, 100))
    }
}
}
七、完整测试方案

7.1 测试用例计划

测试场景验证目标方法网络中断恢复自动续传能力手动断开网络分片哈希校验数据完整性保障修改分片内容并发压力测试服务器稳固性同时发起100+上传大文件测试内存走漏查抄上传10GB文件 7.2 自动化测试示例

def test_resume_upload():
    # 初始化上传
    file = generate_large_file('1GB.bin')
    response = start_upload(file)
    assert response.status_code == 200
   
    # 中断上传
    interrupt_network()
    upload_chunk()
    assert_last_progress_saved()
   
    # 恢复上传
    restore_network()
    resume_response = resume_upload(file)
    assert_file_complete(resume_response)
总结:本文从原理到实践具体讲授了前端断点续传的完整实现方案,包含文件分片、加密传输、进度恢复等焦点技术点,并提供了生产情况可用的代码实现。
https://i-blog.csdnimg.cn/direct/407a06bed3fe485eb3d94cba305d8b1b.gif#pic_center

免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。
页: [1]
查看完整版本: 前端大文件断点续传完整实现指南:原理、安全战略与代码实战