feat(s3): Add support for S3 object storage classes

Introduces a new 'storage_class' configuration option for S3 providers. Users can now specify the desired storage class (e.g., Standard, GLACIER, DEEP_ARCHIVE) for objects uploaded to S3-compatible services like AWS S3 and Tencent COS.

The input storage class string is normalized to match AWS SDK constants, supporting various common aliases. If an unknown storage class is provided, it will be used as a raw value with a warning. This enhancement provides greater control over storage costs and data access patterns.
This commit is contained in:
okatu-loli 2025-10-15 15:43:19 +08:00
parent 4f8bc478d5
commit e673ae069b
3 changed files with 35 additions and 0 deletions

View File

@ -15,6 +15,7 @@ import (
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/server/common"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
@ -32,6 +33,33 @@ type S3 struct {
cron *cron.Cron
}
var storageClassLookup = map[string]string{
"standard": s3.ObjectStorageClassStandard,
"reduced_redundancy": s3.ObjectStorageClassReducedRedundancy,
"glacier": s3.ObjectStorageClassGlacier,
"standard_ia": s3.ObjectStorageClassStandardIa,
"onezone_ia": s3.ObjectStorageClassOnezoneIa,
"intelligent_tiering": s3.ObjectStorageClassIntelligentTiering,
"deep_archive": s3.ObjectStorageClassDeepArchive,
"outposts": s3.ObjectStorageClassOutposts,
"glacier_ir": s3.ObjectStorageClassGlacierIr,
"snow": s3.ObjectStorageClassSnow,
"express_onezone": s3.ObjectStorageClassExpressOnezone,
}
func (d *S3) resolveStorageClass() *string {
value := strings.TrimSpace(d.StorageClass)
if value == "" {
return nil
}
normalized := strings.ToLower(strings.ReplaceAll(value, "-", "_"))
if v, ok := storageClassLookup[normalized]; ok {
return aws.String(v)
}
log.Warnf("s3: unknown storage class %q, using raw value", d.StorageClass)
return aws.String(value)
}
func (d *S3) Config() driver.Config {
return d.config
}
@ -179,6 +207,9 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
}),
ContentType: &contentType,
}
if storageClass := d.resolveStorageClass(); storageClass != nil {
input.StorageClass = storageClass
}
_, err := uploader.UploadWithContext(ctx, input)
return err
}

View File

@ -21,6 +21,7 @@ type Addition struct {
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
StorageClass string `json:"storage_class" type:"select" options:",standard,standard_ia,onezone_ia,intelligent_tiering,glacier,glacier_ir,deep_archive,archive" help:"Storage class for new objects. AWS and Tencent COS support different subsets (COS uses ARCHIVE/DEEP_ARCHIVE)."`
}
func init() {

View File

@ -202,6 +202,9 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error {
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + srcKey)),
Key: &dstKey,
}
if storageClass := d.resolveStorageClass(); storageClass != nil {
input.StorageClass = storageClass
}
_, err := d.client.CopyObject(input)
return err
}