PeerTube/server/lib/object-storage/shared/object-storage-helpers.ts

230 lines
6.9 KiB
TypeScript
Raw Normal View History

Add support for saving video files to object storage (#4290) * Add support for saving video files to object storage * Add support for custom url generation on s3 stored files Uses two config keys to support url generation that doesn't directly go to (compatible s3). Can be used to generate urls to any cache server or CDN. * Upload files to s3 concurrently and delete originals afterwards * Only publish after move to object storage is complete * Use base url instead of url template * Fix mistyped config field * Add rudenmentary way to download before transcode * Implement Chocobozzz suggestions https://github.com/Chocobozzz/PeerTube/pull/4290#issuecomment-891670478 The remarks in question: Try to use objectStorage prefix instead of s3 prefix for your function/variables/config names Prefer to use a tree for the config: s3.streaming_playlists_bucket -> object_storage.streaming_playlists.bucket Use uppercase for config: S3.STREAMING_PLAYLISTS_BUCKETINFO.bucket -> OBJECT_STORAGE.STREAMING_PLAYLISTS.BUCKET (maybe BUCKET_NAME instead of BUCKET) I suggest to rename moveJobsRunning to pendingMovingJobs (or better, create a dedicated videoJobInfo table with a pendingMove & videoId columns so we could also use this table to track pending transcoding jobs) https://github.com/Chocobozzz/PeerTube/pull/4290/files#diff-3e26d41ca4bda1de8e1747af70ca2af642abcc1e9e0bfb94239ff2165acfbde5R19 uses a string instead of an integer I think we should store the origin object storage URL in fileUrl, without base_url injection. Instead, inject the base_url at "runtime" so admins can easily change this configuration without running a script to update DB URLs * Import correct function * Support multipart upload * Remove import of node 15.0 module stream/promises * Extend maximum upload job length Using the same value as for redundancy downloading seems logical * Use dynamic part size for really large uploads Also adds very small part size for local testing * Fix decreasePendingMove query * Resolve various PR comments * Move to object storage after optimize * Make upload size configurable and increase default * Prune webtorrent files that are stored in object storage * Move files after transcoding jobs * Fix federation * Add video path manager * Support move to external storage job in client * Fix live object storage tests Co-authored-by: Chocobozzz <me@florianbigard.com>
2021-08-17 08:26:20 +02:00
import { close, createReadStream, createWriteStream, ensureDir, open, ReadStream, stat } from 'fs-extra'
import { min } from 'lodash'
import { dirname } from 'path'
import { Readable } from 'stream'
import {
CompletedPart,
CompleteMultipartUploadCommand,
CreateMultipartUploadCommand,
DeleteObjectCommand,
GetObjectCommand,
ListObjectsV2Command,
PutObjectCommand,
UploadPartCommand
} from '@aws-sdk/client-s3'
import { pipelinePromise } from '@server/helpers/core-utils'
import { isArray } from '@server/helpers/custom-validators/misc'
import { logger } from '@server/helpers/logger'
import { CONFIG } from '@server/initializers/config'
import { getPrivateUrl } from '../urls'
import { getClient } from './client'
import { lTags } from './logger'
type BucketInfo = {
BUCKET_NAME: string
PREFIX?: string
}
async function storeObject (options: {
inputPath: string
objectStorageKey: string
bucketInfo: BucketInfo
}): Promise<string> {
const { inputPath, objectStorageKey, bucketInfo } = options
logger.debug('Uploading file %s to %s%s in bucket %s', inputPath, bucketInfo.PREFIX, objectStorageKey, bucketInfo.BUCKET_NAME, lTags())
const stats = await stat(inputPath)
// If bigger than max allowed size we do a multipart upload
if (stats.size > CONFIG.OBJECT_STORAGE.MAX_UPLOAD_PART) {
return multiPartUpload({ inputPath, objectStorageKey, bucketInfo })
}
const fileStream = createReadStream(inputPath)
return objectStoragePut({ objectStorageKey, content: fileStream, bucketInfo })
}
async function removeObject (filename: string, bucketInfo: BucketInfo) {
const command = new DeleteObjectCommand({
Bucket: bucketInfo.BUCKET_NAME,
Key: buildKey(filename, bucketInfo)
})
return getClient().send(command)
}
async function removePrefix (prefix: string, bucketInfo: BucketInfo) {
const s3Client = getClient()
const commandPrefix = bucketInfo.PREFIX + prefix
const listCommand = new ListObjectsV2Command({
Bucket: bucketInfo.BUCKET_NAME,
Prefix: commandPrefix
})
const listedObjects = await s3Client.send(listCommand)
// FIXME: use bulk delete when s3ninja will support this operation
// const deleteParams = {
// Bucket: bucketInfo.BUCKET_NAME,
// Delete: { Objects: [] }
// }
if (isArray(listedObjects.Contents) !== true) {
const message = `Cannot remove ${commandPrefix} prefix in bucket ${bucketInfo.BUCKET_NAME}: no files listed.`
logger.error(message, { response: listedObjects, ...lTags() })
throw new Error(message)
}
for (const object of listedObjects.Contents) {
const command = new DeleteObjectCommand({
Bucket: bucketInfo.BUCKET_NAME,
Key: object.Key
})
await s3Client.send(command)
// FIXME: use bulk delete when s3ninja will support this operation
// deleteParams.Delete.Objects.push({ Key: object.Key })
}
// FIXME: use bulk delete when s3ninja will support this operation
// const deleteCommand = new DeleteObjectsCommand(deleteParams)
// await s3Client.send(deleteCommand)
// Repeat if not all objects could be listed at once (limit of 1000?)
if (listedObjects.IsTruncated) await removePrefix(prefix, bucketInfo)
}
async function makeAvailable (options: {
key: string
destination: string
bucketInfo: BucketInfo
}) {
const { key, destination, bucketInfo } = options
await ensureDir(dirname(options.destination))
const command = new GetObjectCommand({
Bucket: bucketInfo.BUCKET_NAME,
Key: buildKey(key, bucketInfo)
})
const response = await getClient().send(command)
const file = createWriteStream(destination)
await pipelinePromise(response.Body as Readable, file)
file.close()
}
function buildKey (key: string, bucketInfo: BucketInfo) {
return bucketInfo.PREFIX + key
}
// ---------------------------------------------------------------------------
export {
BucketInfo,
buildKey,
storeObject,
removeObject,
removePrefix,
makeAvailable
}
// ---------------------------------------------------------------------------
async function objectStoragePut (options: {
objectStorageKey: string
content: ReadStream
bucketInfo: BucketInfo
}) {
const { objectStorageKey, content, bucketInfo } = options
const command = new PutObjectCommand({
Bucket: bucketInfo.BUCKET_NAME,
Key: buildKey(objectStorageKey, bucketInfo),
Body: content
})
await getClient().send(command)
return getPrivateUrl(bucketInfo, objectStorageKey)
}
async function multiPartUpload (options: {
inputPath: string
objectStorageKey: string
bucketInfo: BucketInfo
}) {
const { objectStorageKey, inputPath, bucketInfo } = options
const key = buildKey(objectStorageKey, bucketInfo)
const s3Client = getClient()
const statResult = await stat(inputPath)
const createMultipartCommand = new CreateMultipartUploadCommand({
Bucket: bucketInfo.BUCKET_NAME,
Key: key
})
const createResponse = await s3Client.send(createMultipartCommand)
const fd = await open(inputPath, 'r')
let partNumber = 1
const parts: CompletedPart[] = []
const partSize = CONFIG.OBJECT_STORAGE.MAX_UPLOAD_PART
for (let start = 0; start < statResult.size; start += partSize) {
logger.debug(
'Uploading part %d of file to %s%s in bucket %s',
partNumber, bucketInfo.PREFIX, objectStorageKey, bucketInfo.BUCKET_NAME, lTags()
)
// FIXME: Remove when https://github.com/aws/aws-sdk-js-v3/pull/2637 is released
// The s3 sdk needs to know the length of the http body beforehand, but doesn't support
// streams with start and end set, so it just tries to stat the file in stream.path.
// This fails for us because we only want to send part of the file. The stream type
// is modified so we can set the byteLength here, which s3 detects because array buffers
// have this field set
const stream: ReadStream & { byteLength: number } =
createReadStream(
inputPath,
{ fd, autoClose: false, start, end: (start + partSize) - 1 }
) as ReadStream & { byteLength: number }
// Calculate if the part size is more than what's left over, and in that case use left over bytes for byteLength
stream.byteLength = min([ statResult.size - start, partSize ])
const uploadPartCommand = new UploadPartCommand({
Bucket: bucketInfo.BUCKET_NAME,
Key: key,
UploadId: createResponse.UploadId,
PartNumber: partNumber,
Body: stream
})
const uploadResponse = await s3Client.send(uploadPartCommand)
parts.push({ ETag: uploadResponse.ETag, PartNumber: partNumber })
partNumber += 1
}
await close(fd)
const completeUploadCommand = new CompleteMultipartUploadCommand({
Bucket: bucketInfo.BUCKET_NAME,
Key: objectStorageKey,
UploadId: createResponse.UploadId,
MultipartUpload: { Parts: parts }
})
await s3Client.send(completeUploadCommand)
logger.debug(
'Completed %s%s in bucket %s in %d parts',
bucketInfo.PREFIX, objectStorageKey, bucketInfo.BUCKET_NAME, partNumber - 1, lTags()
)
return getPrivateUrl(bucketInfo, objectStorageKey)
}