+
🔒 本次登录需要验证
+
This login requires verification
+
+
下面是验证所需要的数据,具体使用方法请参照对应的驱动文档
+ Below are the relevant verification data. For specific usage methods, please refer to the corresponding driver documentation.
+
+
`, string(reviewDataJSON))
+}
+
// 计算文件Gcid
func getGcid(r io.Reader, size int64) (string, error) {
calcBlockSize := func(j int64) int64 {
@@ -200,3 +260,24 @@ func getGcid(r io.Reader, size int64) (string, error) {
}
return hex.EncodeToString(hash1.Sum(nil)), nil
}
+
+func generateDeviceSign(deviceID, packageName string) string {
+
+ signatureBase := fmt.Sprintf("%s%s%s%s", deviceID, packageName, APPID, APPKey)
+
+ sha1Hash := sha1.New()
+ sha1Hash.Write([]byte(signatureBase))
+ sha1Result := sha1Hash.Sum(nil)
+
+ sha1String := hex.EncodeToString(sha1Result)
+
+ md5Hash := md5.New()
+ md5Hash.Write([]byte(sha1String))
+ md5Result := md5Hash.Sum(nil)
+
+ md5String := hex.EncodeToString(md5Result)
+
+ deviceSign := fmt.Sprintf("div101.%s%s", deviceID, md5String)
+
+ return deviceSign
+}
diff --git a/drivers/thunder_browser/driver.go b/drivers/thunder_browser/driver.go
index 96dd7e8e..0b38d077 100644
--- a/drivers/thunder_browser/driver.go
+++ b/drivers/thunder_browser/driver.go
@@ -4,10 +4,15 @@ import (
"context"
"errors"
"fmt"
+ "io"
+ "net/http"
+ "strings"
+
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
+ streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"github.com/aws/aws-sdk-go/aws"
@@ -15,9 +20,6 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/go-resty/resty/v2"
- "io"
- "net/http"
- "strings"
)
type ThunderBrowser struct {
@@ -456,15 +458,10 @@ func (xc *XunLeiBrowserCommon) Remove(ctx context.Context, obj model.Obj) error
}
func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
- hi := stream.GetHash()
- gcid := hi.GetHash(hash_extend.GCID)
+ gcid := stream.GetHash().GetHash(hash_extend.GCID)
+ var err error
if len(gcid) < hash_extend.GCID.Width {
- tFile, err := stream.CacheFullInTempFile()
- if err != nil {
- return err
- }
-
- gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
+ _, gcid, err = streamPkg.CacheFullInTempFileAndHash(stream, hash_extend.GCID, stream.GetSize())
if err != nil {
return err
}
@@ -481,7 +478,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
}
var resp UploadTaskResponse
- _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
+ _, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&js)
}, &resp)
@@ -508,7 +505,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
Bucket: aws.String(param.Bucket),
Key: aws.String(param.Key),
Expires: aws.Time(param.Expiration),
- Body: io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up)),
+ Body: driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up))),
})
return err
}
diff --git a/drivers/thunderx/driver.go b/drivers/thunderx/driver.go
index b9ee668c..6ee8901a 100644
--- a/drivers/thunderx/driver.go
+++ b/drivers/thunderx/driver.go
@@ -3,11 +3,15 @@ package thunderx
import (
"context"
"fmt"
+ "net/http"
+ "strings"
+
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"github.com/aws/aws-sdk-go/aws"
@@ -15,8 +19,6 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/go-resty/resty/v2"
- "net/http"
- "strings"
)
type ThunderX struct {
@@ -363,29 +365,24 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error {
return err
}
-func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
- hi := stream.GetHash()
- gcid := hi.GetHash(hash_extend.GCID)
+func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
+ gcid := file.GetHash().GetHash(hash_extend.GCID)
+ var err error
if len(gcid) < hash_extend.GCID.Width {
- tFile, err := stream.CacheFullInTempFile()
- if err != nil {
- return err
- }
-
- gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
+ _, gcid, err = stream.CacheFullInTempFileAndHash(file, hash_extend.GCID, file.GetSize())
if err != nil {
return err
}
}
var resp UploadTaskResponse
- _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
+ _, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&base.Json{
"kind": FILE,
"parent_id": dstDir.GetID(),
- "name": stream.GetName(),
- "size": stream.GetSize(),
+ "name": file.GetName(),
+ "size": file.GetSize(),
"hash": gcid,
"upload_type": UPLOAD_TYPE_RESUMABLE,
})
@@ -406,14 +403,17 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
return err
}
uploader := s3manager.NewUploader(s)
- if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
- uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
+ if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
+ uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
}
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: aws.String(param.Bucket),
Key: aws.String(param.Key),
Expires: aws.Time(param.Expiration),
- Body: stream,
+ Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
+ Reader: file,
+ UpdateProgress: up,
+ }),
})
return err
}
diff --git a/drivers/trainbit/driver.go b/drivers/trainbit/driver.go
index 795b2fb8..f4f4bf3f 100644
--- a/drivers/trainbit/driver.go
+++ b/drivers/trainbit/driver.go
@@ -58,7 +58,7 @@ func (d *Trainbit) List(ctx context.Context, dir model.Obj, args model.ListArgs)
return nil, err
}
var jsonData any
- json.Unmarshal(data, &jsonData)
+ err = json.Unmarshal(data, &jsonData)
if err != nil {
return nil, err
}
@@ -114,23 +114,18 @@ func (d *Trainbit) Remove(ctx context.Context, obj model.Obj) error {
return err
}
-func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
+func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
endpoint, _ := url.Parse("https://tb28.trainbit.com/api/upload/send_raw/")
query := &url.Values{}
query.Add("q", strings.Split(dstDir.GetID(), "_")[1])
query.Add("guid", guid)
- query.Add("name", url.QueryEscape(local2provider(stream.GetName(), false)+"."))
+ query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+"."))
endpoint.RawQuery = query.Encode()
- var total int64
- total = 0
- progressReader := &ProgressReader{
- stream,
- func(byteNum int) {
- total += int64(byteNum)
- up(float64(total) / float64(stream.GetSize()) * 100)
- },
- }
- req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader)
+ progressReader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
+ Reader: s,
+ UpdateProgress: up,
+ })
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader)
if err != nil {
return err
}
diff --git a/drivers/trainbit/util.go b/drivers/trainbit/util.go
index afc111a8..486e8851 100644
--- a/drivers/trainbit/util.go
+++ b/drivers/trainbit/util.go
@@ -13,17 +13,6 @@ import (
"github.com/alist-org/alist/v3/internal/model"
)
-type ProgressReader struct {
- io.Reader
- reporter func(byteNum int)
-}
-
-func (progressReader *ProgressReader) Read(data []byte) (int, error) {
- byteNum, err := progressReader.Reader.Read(data)
- progressReader.reporter(byteNum)
- return byteNum, err
-}
-
func get(url string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
diff --git a/drivers/url_tree/driver.go b/drivers/url_tree/driver.go
index 6a45bb7d..049bd2db 100644
--- a/drivers/url_tree/driver.go
+++ b/drivers/url_tree/driver.go
@@ -2,11 +2,15 @@ package url_tree
import (
"context"
+ "errors"
stdpath "path"
+ "strings"
+ "sync"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
)
@@ -14,7 +18,8 @@ import (
type Urls struct {
model.Storage
Addition
- root *Node
+ root *Node
+ mutex sync.RWMutex
}
func (d *Urls) Config() driver.Config {
@@ -40,11 +45,15 @@ func (d *Urls) Drop(ctx context.Context) error {
}
func (d *Urls) Get(ctx context.Context, path string) (model.Obj, error) {
+ d.mutex.RLock()
+ defer d.mutex.RUnlock()
node := GetNodeFromRootByPath(d.root, path)
return nodeToObj(node, path)
}
func (d *Urls) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
+ d.mutex.RLock()
+ defer d.mutex.RUnlock()
node := GetNodeFromRootByPath(d.root, dir.GetPath())
log.Debugf("path: %s, node: %+v", dir.GetPath(), node)
if node == nil {
@@ -59,6 +68,8 @@ func (d *Urls) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
}
func (d *Urls) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
+ d.mutex.RLock()
+ defer d.mutex.RUnlock()
node := GetNodeFromRootByPath(d.root, file.GetPath())
log.Debugf("path: %s, node: %+v", file.GetPath(), node)
if node == nil {
@@ -72,6 +83,192 @@ func (d *Urls) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
return nil, errs.NotFile
}
+func (d *Urls) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
+ if !d.Writable {
+ return nil, errs.PermissionDenied
+ }
+ d.mutex.Lock()
+ defer d.mutex.Unlock()
+ node := GetNodeFromRootByPath(d.root, parentDir.GetPath())
+ if node == nil {
+ return nil, errs.ObjectNotFound
+ }
+ if node.isFile() {
+ return nil, errs.NotFolder
+ }
+ dir := &Node{
+ Name: dirName,
+ Level: node.Level + 1,
+ }
+ node.Children = append(node.Children, dir)
+ d.updateStorage()
+ return nodeToObj(dir, stdpath.Join(parentDir.GetPath(), dirName))
+}
+
+func (d *Urls) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
+ if !d.Writable {
+ return nil, errs.PermissionDenied
+ }
+ if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) {
+ return nil, errors.New("cannot move parent dir to child")
+ }
+ d.mutex.Lock()
+ defer d.mutex.Unlock()
+ dstNode := GetNodeFromRootByPath(d.root, dstDir.GetPath())
+ if dstNode == nil || dstNode.isFile() {
+ return nil, errs.NotFolder
+ }
+ srcDir, srcName := stdpath.Split(srcObj.GetPath())
+ srcParentNode := GetNodeFromRootByPath(d.root, srcDir)
+ if srcParentNode == nil {
+ return nil, errs.ObjectNotFound
+ }
+ newChildren := make([]*Node, 0, len(srcParentNode.Children))
+ var srcNode *Node
+ for _, child := range srcParentNode.Children {
+ if child.Name == srcName {
+ srcNode = child
+ } else {
+ newChildren = append(newChildren, child)
+ }
+ }
+ if srcNode == nil {
+ return nil, errs.ObjectNotFound
+ }
+ srcParentNode.Children = newChildren
+ srcNode.setLevel(dstNode.Level + 1)
+ dstNode.Children = append(dstNode.Children, srcNode)
+ d.root.calSize()
+ d.updateStorage()
+ return nodeToObj(srcNode, stdpath.Join(dstDir.GetPath(), srcName))
+}
+
+func (d *Urls) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
+ if !d.Writable {
+ return nil, errs.PermissionDenied
+ }
+ d.mutex.Lock()
+ defer d.mutex.Unlock()
+ srcNode := GetNodeFromRootByPath(d.root, srcObj.GetPath())
+ if srcNode == nil {
+ return nil, errs.ObjectNotFound
+ }
+ srcNode.Name = newName
+ d.updateStorage()
+ return nodeToObj(srcNode, stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName))
+}
+
+func (d *Urls) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
+ if !d.Writable {
+ return nil, errs.PermissionDenied
+ }
+ if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) {
+ return nil, errors.New("cannot copy parent dir to child")
+ }
+ d.mutex.Lock()
+ defer d.mutex.Unlock()
+ dstNode := GetNodeFromRootByPath(d.root, dstDir.GetPath())
+ if dstNode == nil || dstNode.isFile() {
+ return nil, errs.NotFolder
+ }
+ srcNode := GetNodeFromRootByPath(d.root, srcObj.GetPath())
+ if srcNode == nil {
+ return nil, errs.ObjectNotFound
+ }
+ newNode := srcNode.deepCopy(dstNode.Level + 1)
+ dstNode.Children = append(dstNode.Children, newNode)
+ d.root.calSize()
+ d.updateStorage()
+ return nodeToObj(newNode, stdpath.Join(dstDir.GetPath(), stdpath.Base(srcObj.GetPath())))
+}
+
+func (d *Urls) Remove(ctx context.Context, obj model.Obj) error {
+ if !d.Writable {
+ return errs.PermissionDenied
+ }
+ d.mutex.Lock()
+ defer d.mutex.Unlock()
+ objDir, objName := stdpath.Split(obj.GetPath())
+ nodeParent := GetNodeFromRootByPath(d.root, objDir)
+ if nodeParent == nil {
+ return errs.ObjectNotFound
+ }
+ newChildren := make([]*Node, 0, len(nodeParent.Children))
+ var deletedObj *Node
+ for _, child := range nodeParent.Children {
+ if child.Name != objName {
+ newChildren = append(newChildren, child)
+ } else {
+ deletedObj = child
+ }
+ }
+ if deletedObj == nil {
+ return errs.ObjectNotFound
+ }
+ nodeParent.Children = newChildren
+ if deletedObj.Size > 0 {
+ d.root.calSize()
+ }
+ d.updateStorage()
+ return nil
+}
+
+func (d *Urls) PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error) {
+ if !d.Writable {
+ return nil, errs.PermissionDenied
+ }
+ d.mutex.Lock()
+ defer d.mutex.Unlock()
+ dirNode := GetNodeFromRootByPath(d.root, dstDir.GetPath())
+ if dirNode == nil || dirNode.isFile() {
+ return nil, errs.NotFolder
+ }
+ newNode := &Node{
+ Name: name,
+ Level: dirNode.Level + 1,
+ Url: url,
+ }
+ dirNode.Children = append(dirNode.Children, newNode)
+ if d.HeadSize {
+ size, err := getSizeFromUrl(url)
+ if err != nil {
+ log.Errorf("get size from url error: %s", err)
+ } else {
+ newNode.Size = size
+ d.root.calSize()
+ }
+ }
+ d.updateStorage()
+ return nodeToObj(newNode, stdpath.Join(dstDir.GetPath(), name))
+}
+
+func (d *Urls) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
+ if !d.Writable {
+ return errs.PermissionDenied
+ }
+ d.mutex.Lock()
+ defer d.mutex.Unlock()
+ node := GetNodeFromRootByPath(d.root, dstDir.GetPath()) // parent
+ if node == nil {
+ return errs.ObjectNotFound
+ }
+ if node.isFile() {
+ return errs.NotFolder
+ }
+ file, err := parseFileLine(stream.GetName(), d.HeadSize)
+ if err != nil {
+ return err
+ }
+ node.Children = append(node.Children, file)
+ d.updateStorage()
+ return nil
+}
+
+func (d *Urls) updateStorage() {
+ d.UrlStructure = StringifyTree(d.root)
+ op.MustSaveDriverStorage(d)
+}
+
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
diff --git a/drivers/url_tree/meta.go b/drivers/url_tree/meta.go
index b3ae33dc..c40414f5 100644
--- a/drivers/url_tree/meta.go
+++ b/drivers/url_tree/meta.go
@@ -12,6 +12,7 @@ type Addition struct {
// define other
UrlStructure string `json:"url_structure" type:"text" required:"true" default:"https://jsd.nn.ci/gh/alist-org/alist/README.md\nhttps://jsd.nn.ci/gh/alist-org/alist/README_cn.md\nfolder:\n CONTRIBUTING.md:1635:https://jsd.nn.ci/gh/alist-org/alist/CONTRIBUTING.md\n CODE_OF_CONDUCT.md:2093:https://jsd.nn.ci/gh/alist-org/alist/CODE_OF_CONDUCT.md" help:"structure:FolderName:\n [FileName:][FileSize:][Modified:]Url"`
HeadSize bool `json:"head_size" type:"bool" default:"false" help:"Use head method to get file size, but it may be failed."`
+ Writable bool `json:"writable" type:"bool" default:"false"`
}
var config = driver.Config{
@@ -20,7 +21,7 @@ var config = driver.Config{
OnlyLocal: false,
OnlyProxy: false,
NoCache: true,
- NoUpload: true,
+ NoUpload: false,
NeedMs: false,
DefaultRoot: "",
CheckStatus: true,
diff --git a/drivers/url_tree/types.go b/drivers/url_tree/types.go
index 7e8ca3d9..cf62d29d 100644
--- a/drivers/url_tree/types.go
+++ b/drivers/url_tree/types.go
@@ -1,5 +1,7 @@
package url_tree
+import "github.com/alist-org/alist/v3/pkg/utils"
+
// Node is a node in the folder tree
type Node struct {
Url string
@@ -44,3 +46,19 @@ func (node *Node) calSize() int64 {
node.Size = size
return size
}
+
+func (node *Node) setLevel(level int) {
+ node.Level = level
+ for _, child := range node.Children {
+ child.setLevel(level + 1)
+ }
+}
+
+func (node *Node) deepCopy(level int) *Node {
+ ret := *node
+ ret.Level = level
+ ret.Children, _ = utils.SliceConvert(ret.Children, func(child *Node) (*Node, error) {
+ return child.deepCopy(level + 1), nil
+ })
+ return &ret
+}
diff --git a/drivers/url_tree/util.go b/drivers/url_tree/util.go
index 4065218f..61a3fde2 100644
--- a/drivers/url_tree/util.go
+++ b/drivers/url_tree/util.go
@@ -153,6 +153,9 @@ func splitPath(path string) []string {
if path == "/" {
return []string{"root"}
}
+ if strings.HasSuffix(path, "/") {
+ path = path[:len(path)-1]
+ }
parts := strings.Split(path, "/")
parts[0] = "root"
return parts
@@ -190,3 +193,46 @@ func getSizeFromUrl(url string) (int64, error) {
}
return size, nil
}
+
+func StringifyTree(node *Node) string {
+ sb := strings.Builder{}
+ if node.Level == -1 {
+ for i, child := range node.Children {
+ sb.WriteString(StringifyTree(child))
+ if i < len(node.Children)-1 {
+ sb.WriteString("\n")
+ }
+ }
+ return sb.String()
+ }
+ for i := 0; i < node.Level; i++ {
+ sb.WriteString(" ")
+ }
+ if node.Url == "" {
+ sb.WriteString(node.Name)
+ sb.WriteString(":")
+ for _, child := range node.Children {
+ sb.WriteString("\n")
+ sb.WriteString(StringifyTree(child))
+ }
+ } else if node.Size == 0 && node.Modified == 0 {
+ if stdpath.Base(node.Url) == node.Name {
+ sb.WriteString(node.Url)
+ } else {
+ sb.WriteString(fmt.Sprintf("%s:%s", node.Name, node.Url))
+ }
+ } else {
+ sb.WriteString(node.Name)
+ sb.WriteString(":")
+ if node.Size != 0 || node.Modified != 0 {
+ sb.WriteString(strconv.FormatInt(node.Size, 10))
+ sb.WriteString(":")
+ }
+ if node.Modified != 0 {
+ sb.WriteString(strconv.FormatInt(node.Modified, 10))
+ sb.WriteString(":")
+ }
+ sb.WriteString(node.Url)
+ }
+ return sb.String()
+}
diff --git a/drivers/uss/driver.go b/drivers/uss/driver.go
index 447515d8..2e219050 100644
--- a/drivers/uss/driver.go
+++ b/drivers/uss/driver.go
@@ -3,6 +3,7 @@ package uss
import (
"context"
"fmt"
+ "github.com/alist-org/alist/v3/internal/stream"
"net/url"
"path"
"strings"
@@ -122,11 +123,13 @@ func (d *USS) Remove(ctx context.Context, obj model.Obj) error {
})
}
-func (d *USS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
- // TODO not support cancel??
+func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
return d.client.Put(&upyun.PutObjectConfig{
- Path: getKey(path.Join(dstDir.GetPath(), stream.GetName()), false),
- Reader: stream,
+ Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false),
+ Reader: driver.NewLimitedUploadStream(ctx, &stream.ReaderUpdatingProgress{
+ Reader: s,
+ UpdateProgress: up,
+ }),
})
}
diff --git a/drivers/vtencent/drive.go b/drivers/vtencent/drive.go
index 67643143..36a91672 100644
--- a/drivers/vtencent/drive.go
+++ b/drivers/vtencent/drive.go
@@ -55,7 +55,9 @@ func (d *Vtencent) Init(ctx context.Context) error {
}
func (d *Vtencent) Drop(ctx context.Context) error {
- d.cron.Stop()
+ if d.cron != nil {
+ d.cron.Stop()
+ }
return nil
}
diff --git a/drivers/vtencent/util.go b/drivers/vtencent/util.go
index ba87f1ab..4ba72d1b 100644
--- a/drivers/vtencent/util.go
+++ b/drivers/vtencent/util.go
@@ -8,9 +8,7 @@ import (
"fmt"
"io"
"net/http"
- "path"
"strconv"
- "strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@@ -151,7 +149,7 @@ func (d *Vtencent) ApplyUploadUGC(signature string, stream model.FileStreamer) (
form := base.Json{
"signature": signature,
"videoName": stream.GetName(),
- "videoType": strings.ReplaceAll(path.Ext(stream.GetName()), ".", ""),
+ "videoType": utils.Ext(stream.GetName()),
"videoSize": stream.GetSize(),
}
var resps RspApplyUploadUGC
@@ -278,7 +276,8 @@ func (d *Vtencent) FileUpload(ctx context.Context, dstDir model.Obj, stream mode
input := &s3manager.UploadInput{
Bucket: aws.String(fmt.Sprintf("%s-%d", params.StorageBucket, params.StorageAppID)),
Key: ¶ms.Video.StoragePath,
- Body: io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up))),
+ Body: driver.NewLimitedUploadStream(ctx,
+ io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up)))),
}
_, err = uploader.UploadWithContext(ctx, input)
if err != nil {
diff --git a/drivers/webdav/driver.go b/drivers/webdav/driver.go
index b402b1db..45150fca 100644
--- a/drivers/webdav/driver.go
+++ b/drivers/webdav/driver.go
@@ -93,13 +93,16 @@ func (d *WebDav) Remove(ctx context.Context, obj model.Obj) error {
return d.client.RemoveAll(getPath(obj))
}
-func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
+func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
callback := func(r *http.Request) {
- r.Header.Set("Content-Type", stream.GetMimetype())
- r.ContentLength = stream.GetSize()
+ r.Header.Set("Content-Type", s.GetMimetype())
+ r.ContentLength = s.GetSize()
}
- // TODO: support cancel
- err := d.client.WriteStream(path.Join(dstDir.GetPath(), stream.GetName()), stream, 0644, callback)
+ reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
+ Reader: s,
+ UpdateProgress: up,
+ })
+ err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), reader, 0644, callback)
return err
}
diff --git a/drivers/weiyun/driver.go b/drivers/weiyun/driver.go
index e6d5897c..90793d33 100644
--- a/drivers/weiyun/driver.go
+++ b/drivers/weiyun/driver.go
@@ -7,6 +7,7 @@ import (
"math"
"net/http"
"strconv"
+ "sync/atomic"
"time"
"github.com/alist-org/alist/v3/drivers/base"
@@ -69,7 +70,7 @@ func (d *WeiYun) Init(ctx context.Context) error {
if d.client.LoginType() == 1 {
d.cron = cron.NewCron(time.Minute * 5)
d.cron.Do(func() {
- d.client.KeepAlive()
+ _ = d.client.KeepAlive()
})
}
@@ -311,77 +312,83 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// NOTE:
// 秒传需要sha1最后一个状态,但sha1无法逆运算需要读完整个文件(或许可以??)
// 服务器支持上传进度恢复,不需要额外实现
- if folder, ok := dstDir.(*Folder); ok {
- file, err := stream.CacheFullInTempFile()
- if err != nil {
- return nil, err
- }
+ var folder *Folder
+ var ok bool
+ if folder, ok = dstDir.(*Folder); !ok {
+ return nil, errs.NotSupport
+ }
+ file, err := stream.CacheFullInTempFile()
+ if err != nil {
+ return nil, err
+ }
- // step 1.
- preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{
- PdirKey: folder.GetPKey(),
- DirKey: folder.DirKey,
+ // step 1.
+ preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{
+ PdirKey: folder.GetPKey(),
+ DirKey: folder.DirKey,
- FileName: stream.GetName(),
- FileSize: stream.GetSize(),
- File: file,
+ FileName: stream.GetName(),
+ FileSize: stream.GetSize(),
+ File: file,
- ChannelCount: 4,
- FileExistOption: 1,
- })
- if err != nil {
- return nil, err
- }
+ ChannelCount: 4,
+ FileExistOption: 1,
+ })
+ if err != nil {
+ return nil, err
+ }
- // not fast upload
- if !preData.FileExist {
- // step.2 增加上传通道
- if len(preData.ChannelList) < d.uploadThread {
- newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData)
- if err != nil {
- return nil, err
- }
- preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...)
- }
- // step.3 上传
- threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList),
- retry.Attempts(3),
- retry.Delay(time.Second),
- retry.DelayType(retry.BackOffDelay))
-
- for _, channel := range preData.ChannelList {
- if utils.IsCanceled(upCtx) {
- break
- }
-
- var channel = channel
- threadG.Go(func(ctx context.Context) error {
- for {
- channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
- upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
- io.NewSectionReader(file, channel.Offset, int64(channel.Len)))
- if err != nil {
- return err
- }
- // 上传完成
- if upData.UploadState != 1 {
- return nil
- }
- channel = upData.Channel
- }
- })
- }
- if err = threadG.Wait(); err != nil {
+ // not fast upload
+ if !preData.FileExist {
+ // step.2 增加上传通道
+ if len(preData.ChannelList) < d.uploadThread {
+ newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData)
+ if err != nil {
return nil, err
}
+ preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...)
}
+ // step.3 上传
+ threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList),
+ retry.Attempts(3),
+ retry.Delay(time.Second),
+ retry.DelayType(retry.BackOffDelay))
- return &File{
- PFolder: folder,
- File: preData.File,
- }, nil
+ total := atomic.Int64{}
+ for _, channel := range preData.ChannelList {
+ if utils.IsCanceled(upCtx) {
+ break
+ }
+
+ var channel = channel
+ threadG.Go(func(ctx context.Context) error {
+ for {
+ channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
+ len64 := int64(channel.Len)
+ upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
+ driver.NewLimitedUploadStream(ctx, io.NewSectionReader(file, channel.Offset, len64)))
+ if err != nil {
+ return err
+ }
+ cur := total.Add(len64)
+ up(float64(cur) * 100.0 / float64(stream.GetSize()))
+ // 上传完成
+ if upData.UploadState != 1 {
+ return nil
+ }
+ channel = upData.Channel
+ }
+ })
+ }
+ if err = threadG.Wait(); err != nil {
+ return nil, err
+ }
}
- return nil, errs.NotSupport
+
+ return &File{
+ PFolder: folder,
+ File: preData.File,
+ }, nil
}
// func (d *WeiYun) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
diff --git a/drivers/wopan/driver.go b/drivers/wopan/driver.go
index bccce4b1..82ec05a9 100644
--- a/drivers/wopan/driver.go
+++ b/drivers/wopan/driver.go
@@ -155,12 +155,13 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
_, err := d.client.Upload2C(d.getSpaceType(), wopan.Upload2CFile{
Name: stream.GetName(),
Size: stream.GetSize(),
- Content: stream,
+ Content: driver.NewLimitedUploadStream(ctx, stream),
ContentType: stream.GetMimetype(),
}, dstDir.GetID(), d.FamilyID, wopan.Upload2COption{
OnProgress: func(current, total int64) {
up(100 * float64(current) / float64(total))
},
+ Ctx: ctx,
})
return err
}
diff --git a/drivers/yandex_disk/driver.go b/drivers/yandex_disk/driver.go
index 5af9f2e4..6e5ca05c 100644
--- a/drivers/yandex_disk/driver.go
+++ b/drivers/yandex_disk/driver.go
@@ -106,25 +106,31 @@ func (d *YandexDisk) Remove(ctx context.Context, obj model.Obj) error {
return err
}
-func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
+func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
var resp UploadResp
_, err := d.request("/upload", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
- "path": path.Join(dstDir.GetPath(), stream.GetName()),
+ "path": path.Join(dstDir.GetPath(), s.GetName()),
"overwrite": "true",
})
}, &resp)
if err != nil {
return err
}
- req, err := http.NewRequest(resp.Method, resp.Href, stream)
+ reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
+ Reader: s,
+ UpdateProgress: up,
+ })
+ req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, reader)
if err != nil {
return err
}
- req = req.WithContext(ctx)
- req.Header.Set("Content-Length", strconv.FormatInt(stream.GetSize(), 10))
+ req.Header.Set("Content-Length", strconv.FormatInt(s.GetSize(), 10))
req.Header.Set("Content-Type", "application/octet-stream")
res, err := base.HttpClient.Do(req)
+ if err != nil {
+ return err
+ }
_ = res.Body.Close()
return err
}
diff --git a/entrypoint.sh b/entrypoint.sh
index a0d80835..c24ed6ee 100644
--- a/entrypoint.sh
+++ b/entrypoint.sh
@@ -1,11 +1,19 @@
-#!/bin/bash
-
-chown -R ${PUID}:${PGID} /opt/alist/
+#!/bin/sh
umask ${UMASK}
if [ "$1" = "version" ]; then
./alist version
else
+ if [ "$RUN_ARIA2" = "true" ]; then
+ chown -R ${PUID}:${PGID} /opt/aria2/
+ exec su-exec ${PUID}:${PGID} nohup aria2c \
+ --enable-rpc \
+ --rpc-allow-origin-all \
+ --conf-path=/opt/aria2/.aria2/aria2.conf \
+ >/dev/null 2>&1 &
+ fi
+
+ chown -R ${PUID}:${PGID} /opt/alist/
exec su-exec ${PUID}:${PGID} ./alist server --no-prefix
fi
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 8ec1c302..e8afe0e7 100644
--- a/go.mod
+++ b/go.mod
@@ -1,9 +1,12 @@
module github.com/alist-org/alist/v3
-go 1.22.4
+go 1.23.4
require (
- github.com/SheltonZhu/115driver v1.0.27
+ github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
+ github.com/KirCute/sftpd-alist v0.0.12
+ github.com/ProtonMail/go-crypto v1.0.0
+ github.com/SheltonZhu/115driver v1.0.34
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21
github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4
github.com/alist-org/gofakes3 v0.0.7
@@ -25,7 +28,6 @@ require (
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564
github.com/foxxorcat/mopan-sdk-go v0.1.6
github.com/foxxorcat/weiyun-sdk-go v0.1.3
- github.com/gaoyb7/115drive-webdav v0.1.8
github.com/gin-contrib/cors v1.7.2
github.com/gin-gonic/gin v1.10.0
github.com/go-resty/resty/v2 v2.14.0
@@ -33,37 +35,42 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
+ github.com/hekmon/transmissionrpc/v3 v3.0.0
github.com/hirochachacha/go-smb2 v1.1.0
github.com/ipfs/go-ipfs-api v0.7.0
github.com/jlaffaye/ftp v0.2.0
github.com/json-iterator/go v1.1.12
+ github.com/kdomanski/iso9660 v0.4.0
github.com/larksuite/oapi-sdk-go/v3 v3.3.1
github.com/maruel/natural v1.1.1
github.com/meilisearch/meilisearch-go v0.27.2
+ github.com/mholt/archives v0.1.0
github.com/minio/sio v0.4.0
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/ncw/swift/v2 v2.0.3
- github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.6
github.com/pquerna/otp v1.4.0
github.com/rclone/rclone v1.67.0
+ github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
github.com/sirupsen/logrus v1.9.3
+ github.com/spf13/afero v1.11.0
github.com/spf13/cobra v1.8.1
- github.com/stretchr/testify v1.9.0
+ github.com/stretchr/testify v1.10.0
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7
github.com/u2takey/ffmpeg-go v0.5.0
github.com/upyun/go-sdk/v3 v3.0.4
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5
- github.com/xhofe/tache v0.1.2
+ github.com/xhofe/tache v0.1.5
github.com/xhofe/wopan-sdk-go v0.1.3
+ github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
- golang.org/x/crypto v0.27.0
+ golang.org/x/crypto v0.36.0
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
golang.org/x/image v0.19.0
- golang.org/x/net v0.28.0
+ golang.org/x/net v0.38.0
golang.org/x/oauth2 v0.22.0
- golang.org/x/time v0.6.0
+ golang.org/x/time v0.8.0
google.golang.org/appengine v1.6.8
gopkg.in/ldap.v3 v3.1.0
gorm.io/driver/mysql v1.5.7
@@ -73,17 +80,47 @@ require (
)
require (
- github.com/BurntSushi/toml v0.3.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
+)
+
+require (
+ github.com/STARRY-S/zip v0.2.1 // indirect
+ github.com/aymerick/douceur v0.2.0 // indirect
github.com/blevesearch/go-faiss v1.0.20 // indirect
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
+ github.com/bodgit/plumbing v1.3.0 // indirect
+ github.com/bodgit/sevenzip v1.6.0
+ github.com/bodgit/windows v1.0.1 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/charmbracelet/x/ansi v0.2.3 // indirect
github.com/charmbracelet/x/term v0.2.0 // indirect
+ github.com/cloudflare/circl v1.3.7 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
+ github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
+ github.com/fclairamb/go-log v0.5.0 // indirect
+ github.com/gorilla/css v1.0.1 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
+ github.com/hekmon/cunits/v2 v2.1.0 // indirect
github.com/ipfs/boxo v0.12.0 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
+ github.com/klauspost/pgzip v1.2.6 // indirect
+ github.com/kr/text v0.2.0 // indirect
+ github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
+ github.com/microcosm-cc/bluemonday v1.0.27
+ github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78
+ github.com/sorairolake/lzip-go v0.3.5 // indirect
+ github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
+ github.com/therootcompany/xz v1.0.1 // indirect
+ github.com/ulikunitz/xz v0.5.12 // indirect
+ github.com/xhofe/115-sdk-go v0.1.5
+ github.com/yuin/goldmark v1.7.8
+ go4.org v0.0.0-20230225012048-214862532bf5
+ resty.dev/v3 v3.0.0-beta.2 // indirect
)
require (
@@ -91,8 +128,8 @@ require (
github.com/RoaringBitmap/roaring v1.9.3 // indirect
github.com/abbot/go-http-auth v0.4.0 // indirect
github.com/aead/ecdh v0.2.0 // indirect
- github.com/andreburgaud/crypt2go v1.2.0 // indirect
- github.com/andybalholm/brotli v1.0.4 // indirect
+ github.com/andreburgaud/crypt2go v1.8.0 // indirect
+ github.com/andybalholm/brotli v1.1.1 // indirect
github.com/axgle/mahonia v0.0.0-20180208002826-3358181d7394
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
@@ -114,7 +151,6 @@ require (
github.com/blevesearch/zapx/v13 v13.3.10 // indirect
github.com/blevesearch/zapx/v14 v14.3.10 // indirect
github.com/blevesearch/zapx/v15 v15.3.13 // indirect
- github.com/bluele/gcache v0.0.2 // indirect
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
github.com/bytedance/sonic v1.11.6 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -147,13 +183,12 @@ require (
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.5.5 // indirect
- github.com/jaevor/go-nanoid v1.3.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
- github.com/klauspost/compress v1.17.8 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
@@ -186,8 +221,9 @@ require (
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/otiai10/copy v1.14.0
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
- github.com/pierrec/lz4/v4 v4.1.18 // indirect
+ github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
@@ -216,10 +252,10 @@ require (
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/bbolt v1.3.8 // indirect
golang.org/x/arch v0.8.0 // indirect
- golang.org/x/sync v0.8.0 // indirect
- golang.org/x/sys v0.25.0 // indirect
- golang.org/x/term v0.24.0 // indirect
- golang.org/x/text v0.18.0 // indirect
+ golang.org/x/sync v0.12.0
+ golang.org/x/sys v0.31.0 // indirect
+ golang.org/x/term v0.30.0 // indirect
+ golang.org/x/text v0.23.0
golang.org/x/tools v0.24.0 // indirect
google.golang.org/api v0.169.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
@@ -231,3 +267,5 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
)
+
+// replace github.com/xhofe/115-sdk-go => ../../xhofe/115-sdk-go
diff --git a/go.sum b/go.sum
index 6ba075f3..6fbaeb2b 100644
--- a/go.sum
+++ b/go.sum
@@ -1,14 +1,47 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po=
+github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E=
+github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnOuISdg=
+github.com/KirCute/sftpd-alist v0.0.12/go.mod h1:2wNK7yyW2XfjyJq10OY6xB4COLac64hOwfV6clDJn6s=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
+github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
+github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
-github.com/SheltonZhu/115driver v1.0.27 h1:Ya1HYHYXFmi7JnqQ/+Vy6xZvq3leto+E+PxTm6UChj8=
-github.com/SheltonZhu/115driver v1.0.27/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4=
+github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
+github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
+github.com/SheltonZhu/115driver v1.0.34 h1:zhMLp4vgq7GksqvSxQQDOVfK6EOHldQl4b2n8tnZ+EE=
+github.com/SheltonZhu/115driver v1.0.34/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A=
github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 h1:h6q5E9aMBhhdqouW81LozVPI1I+Pu6IxL2EKpfm5OjY=
@@ -25,10 +58,11 @@ github.com/alist-org/times v0.0.0-20240721124654-efa0c7d3ad92 h1:pIEI87zhv8ZzQcu
github.com/alist-org/times v0.0.0-20240721124654-efa0c7d3ad92/go.mod h1:oPJwGY3sLmGgcJamGumz//0A35f4BwQRacyqLNcJTOU=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
-github.com/andreburgaud/crypt2go v1.2.0 h1:oly/ENAodeqTYpUafgd4r3v+VKLQnmOKUyfpj+TxHbE=
-github.com/andreburgaud/crypt2go v1.2.0/go.mod h1:kKRqlrX/3Q9Ki7HdUsoh0cX1Urq14/Hcta4l4VrIXrI=
-github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
+github.com/andreburgaud/crypt2go v1.8.0 h1:J73vGTb1P6XL69SSuumbKs0DWn3ulbl9L92ZXBjw6pc=
+github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAPJAF5fKOLB9SXg=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
+github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
@@ -40,6 +74,8 @@ github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiE
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
+github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
+github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -84,20 +120,24 @@ github.com/blevesearch/zapx/v15 v15.3.13 h1:6EkfaZiPlAxqXz0neniq35my6S48QI94W/wy
github.com/blevesearch/zapx/v15 v15.3.13/go.mod h1:Turk/TNRKj9es7ZpKK95PS7f6D44Y7fAFy8F4LXQtGg=
github.com/blevesearch/zapx/v16 v16.1.5 h1:b0sMcarqNFxuXvjoXsF8WtwVahnxyhEvBSRJi/AUHjU=
github.com/blevesearch/zapx/v16 v16.1.5/go.mod h1:J4mSF39w1QELc11EWRSBFkPeZuO7r/NPKkHzDCoiaI8=
-github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
-github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
+github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU=
+github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs=
+github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A=
+github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc=
+github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
+github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc=
github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/charmbracelet/bubbles v0.19.0 h1:gKZkKXPP6GlDk6EcfujDK19PCQqRjaJZQ7QRERx1UF0=
-github.com/charmbracelet/bubbles v0.19.0/go.mod h1:WILteEqZ+krG5c3ntGEMeG99nCupcuIk7V0/zOP0tOA=
github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE=
github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU=
github.com/charmbracelet/bubbletea v1.1.0 h1:FjAl9eAL3HBCHenhz/ZPjkKdScmaS5SK69JAK2YJK9c=
@@ -112,8 +152,15 @@ github.com/charmbracelet/x/term v0.2.0 h1:cNB9Ot9q8I711MyZ7myUR5HFWL/lc3OpU8jZ4h
github.com/charmbracelet/x/term v0.2.0/go.mod h1:GVxgxAbjUrmpvIINHIQnJJKpMlHiZ4cktEQCN6GWyF0=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA=
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
@@ -142,10 +189,17 @@ github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo=
github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4=
+github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
+github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM=
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
+github.com/fclairamb/go-log v0.5.0 h1:Gz9wSamEaA6lta4IU2cjJc2xSq5sV5VYSB5w/SUHhVc=
+github.com/fclairamb/go-log v0.5.0/go.mod h1:XoRO1dYezpsGmLLkZE9I+sHqpqY65p8JA+Vqblb7k40=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/foxxorcat/mopan-sdk-go v0.1.6 h1:6J37oI4wMZLj8EPgSCcSTTIbnI5D6RCNW/srX8vQd1Y=
@@ -157,19 +211,22 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
-github.com/gaoyb7/115drive-webdav v0.1.8 h1:EJt4PSmcbvBY4KUh2zSo5p6fN9LZFNkIzuKejipubVw=
-github.com/gaoyb7/115drive-webdav v0.1.8/go.mod h1:BKbeY6j8SKs3+rzBFFALznGxbPmefEm3vA+dGhqgOGU=
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw=
github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
-github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s=
github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -178,20 +235,14 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
-github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
-github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
-github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-resty/resty/v2 v2.14.0 h1:/rhkzsAqGQkozwfKS5aFAbb6TyKd3zyFRWcdRXLPCAU=
github.com/go-resty/resty/v2 v2.14.0/go.mod h1:IW6mekUOsElt9C7oWr0XRt9BNSD6D5rr9mhk6NjmNHg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
@@ -200,7 +251,6 @@ github.com/go-webauthn/webauthn v0.11.1 h1:5G/+dg91/VcaJHTtJUfwIlNJkLwbJCcnUc4W8
github.com/go-webauthn/webauthn v0.11.1/go.mod h1:YXRm1WG0OtUyDFaVAgB5KG7kVqW+6dYCJ7FTQH4SxEE=
github.com/go-webauthn/x v0.1.12 h1:RjQ5cvApzyU/xLCiP+rub0PE4HBZsLggbxGR5ZpUf/A=
github.com/go-webauthn/x v0.1.12/go.mod h1:XlRcGkNH8PT45TfeJYc6gqpOtiOendHhVmnOxh+5yHs=
-github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -211,14 +261,32 @@ github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17w
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -227,6 +295,11 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-tpm v0.9.1 h1:0pGc4X//bAlmZzMKf8iz6IsDo1nYTbYJ6FZN/rg4zdM=
github.com/google/go-tpm v0.9.1/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -234,21 +307,36 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA=
github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
+github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
+github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI0=
+github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
+github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
+github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ=
@@ -265,8 +353,6 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
-github.com/jaevor/go-nanoid v1.3.0 h1:nD+iepesZS6pr3uOVf20vR9GdGgJW1HPaR46gtrxzkg=
-github.com/jaevor/go-nanoid v1.3.0/go.mod h1:SI+jFaPuddYkqkVQoNGHs81navCtH388TcrH0RqFKgY=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
@@ -282,24 +368,30 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg=
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c=
+github.com/kdomanski/iso9660 v0.4.0 h1:BPKKdcINz3m0MdjIMwS0wx1nofsOjxOq8TOr45WGHFg=
+github.com/kdomanski/iso9660 v0.4.0/go.mod h1:OxUSupHsO9ceI8lBLPJKWBTphLemjrCQY8LPXM7qSzU=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
-github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
-github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
+github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -308,7 +400,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/larksuite/oapi-sdk-go/v3 v3.3.1 h1:DLQQEgHUAGZB6RVlceB1f6A94O206exxW2RIMH+gMUc=
github.com/larksuite/oapi-sdk-go/v3 v3.3.1/go.mod h1:ZEplY+kwuIrj/nqw5uSCINNATcH3KdxSN7y+UxYY5fI=
-github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
@@ -326,9 +417,10 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
+github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE=
+github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@@ -340,6 +432,10 @@ github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/meilisearch/meilisearch-go v0.27.2 h1:3G21dJ5i208shnLPDsIEZ0L0Geg/5oeXABFV7nlK94k=
github.com/meilisearch/meilisearch-go v0.27.2/go.mod h1:SxuSqDcPBIykjWz1PX+KzsYzArNLSCadQodWs8extS0=
+github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q=
+github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I=
+github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
+github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/minio/sio v0.4.0 h1:u4SWVEm5lXSqU42ZWawV0D9I5AZ5YMmo2RXpEQ/kRhc=
@@ -385,15 +481,17 @@ github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
-github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 h1:K3T3eu4h5aYIOzUtLjN08L4Qt4WGaJONMgcaD0ayBJQ=
-github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831/go.mod h1:lSHD4lC4zlMl+zcoysdJcd5KFzsWwOD8BJbyg1Ws9Ng=
+github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY=
+github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
+github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
+github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
+github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
+github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
-github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
-github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
-github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -413,6 +511,7 @@ github.com/pquerna/otp v1.4.0 h1:wZvl1TIVxKRThZIBiwOOHOGP/1+nZyWBil9Y2XNEDzg=
github.com/pquerna/otp v1.4.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
@@ -426,13 +525,17 @@ github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Ny
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
+github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA=
+github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
+github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
+github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc=
github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU=
@@ -441,16 +544,19 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
+github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg=
+github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
+github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -459,22 +565,25 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA=
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA=
+github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 h1:6Y51mutOvRGRx6KqyMNo//xk8B8o6zW9/RVmy1VamOs=
+github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543/go.mod h1:jpwqYA8KUVEvSUJHkCXsnBRJCSKP1BMa81QZ6kvRpow=
+github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
+github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
@@ -487,38 +596,49 @@ github.com/u2takey/ffmpeg-go v0.5.0 h1:r7d86XuL7uLWJ5mzSeQ03uvjfIhiJYvsRAJFCW4uk
github.com/u2takey/ffmpeg-go v0.5.0/go.mod h1:ruZWkvC1FEiUNjmROowOAps3ZcWxEiOpFoHCvk97kGc=
github.com/u2takey/go-utils v0.3.1 h1:TaQTgmEZZeDHQFYfd+AdUT1cT4QJgJn/XVPELhHw4ys=
github.com/u2takey/go-utils v0.3.1/go.mod h1:6e+v5vEZ/6gu12w/DC2ixZdZtCrNokVxD0JUklcqdCs=
-github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
-github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
+github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/upyun/go-sdk/v3 v3.0.4 h1:2DCJa/Yi7/3ZybT9UCPATSzvU3wpPPxhXinNlb1Hi8Q=
github.com/upyun/go-sdk/v3 v3.0.4/go.mod h1:P/SnuuwhrIgAVRd/ZpzDWqCsBAf/oHg7UggbAxyZa0E=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.37.1-0.20220607072126-8a320890c08d h1:xS9QTPgKl9ewGsAOPc+xW7DeStJDqYPfisDmeSCcbco=
github.com/valyala/fasthttp v1.37.1-0.20220607072126-8a320890c08d/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
-github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
-github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXorZG0KzTxbp0Cr1n3FEegfmyd9br1k=
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/xhofe/115-sdk-go v0.1.5 h1:2+E92l6AX0+ABAkrdmDa9PE5ONN7wVLCaKkK80zETOg=
+github.com/xhofe/115-sdk-go v0.1.5/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U=
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
-github.com/xhofe/tache v0.1.2 h1:pHrXlrWcbTb4G7hVUDW7Rc+YTUnLJvnLBrdktVE1Fqg=
-github.com/xhofe/tache v0.1.2/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ=
+github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM=
+github.com/xhofe/tache v0.1.5/go.mod h1:PYt6I/XUKliSg1uHlgsk6ha+le/f6PAvjUtFZAVl3a8=
github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A=
github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q=
+github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
+github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
+github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 h1:K8gF0eekWPEX+57l30ixxzGhHH/qscI3JCnuhbN6V4M=
+github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhfAXj15352hTOuVmG5Gzo8xNRINfqI=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic=
+github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 h1:X+lHsNTlbatQ1cErXIbtyrh+3MTWxqQFS+sBP/wpFXo=
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22/go.mod h1:1zGRDJd8zlG6P8azG96+uywfh6udYWwhOmUivw+xsuM=
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
@@ -529,34 +649,59 @@ go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGX
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
+go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
-golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
-golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
-golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
-golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk=
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.19.0 h1:D9FX4QWkLfkeqaC62SonffIIuYdOk/UE2XKUBgRIBIQ=
golang.org/x/image v0.19.0/go.mod h1:y0zrRqlQRWQ5PXaYCOMLTW2fpsxZ8Qh9I/ohnInJEys=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
@@ -564,27 +709,51 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
-golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
-golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
+golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -593,30 +762,38 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -626,25 +803,25 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
-golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
-golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
-golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
-golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
-golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
-golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -652,23 +829,45 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
-golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
-golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
-golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@@ -681,17 +880,49 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY=
google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
@@ -713,7 +944,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
@@ -725,8 +955,18 @@ gorm.io/driver/sqlite v1.5.6/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDa
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg=
gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
+resty.dev/v3 v3.0.0-beta.2 h1:xu4mGAdbCLuc3kbk7eddWfWm4JfhwDtdapwss5nCjnQ=
+resty.dev/v3 v3.0.0-beta.2/go.mod h1:OgkqiPvTDtOuV4MGZuUDhwOpkY8enjOsjjMzeOHefy4=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/internal/archive/all.go b/internal/archive/all.go
new file mode 100644
index 00000000..63206cb8
--- /dev/null
+++ b/internal/archive/all.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+ _ "github.com/alist-org/alist/v3/internal/archive/archives"
+ _ "github.com/alist-org/alist/v3/internal/archive/iso9660"
+ _ "github.com/alist-org/alist/v3/internal/archive/rardecode"
+ _ "github.com/alist-org/alist/v3/internal/archive/sevenzip"
+ _ "github.com/alist-org/alist/v3/internal/archive/zip"
+)
diff --git a/internal/archive/archives/archives.go b/internal/archive/archives/archives.go
new file mode 100644
index 00000000..0a42cd0c
--- /dev/null
+++ b/internal/archive/archives/archives.go
@@ -0,0 +1,141 @@
+package archives
+
+import (
+ "io"
+ "io/fs"
+ "os"
+ stdpath "path"
+ "strings"
+
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/pkg/utils"
+)
+
+type Archives struct {
+}
+
+func (Archives) AcceptedExtensions() []string {
+ return []string{
+ ".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar",
+ }
+}
+
+func (Archives) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
+ return map[string]tool.MultipartExtension{}
+}
+
+func (Archives) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
+ fsys, err := getFs(ss[0], args)
+ if err != nil {
+ return nil, err
+ }
+ files, err := fsys.ReadDir(".")
+ if err != nil {
+ return nil, filterPassword(err)
+ }
+
+ tree := make([]model.ObjTree, 0, len(files))
+ for _, file := range files {
+ info, err := file.Info()
+ if err != nil {
+ continue
+ }
+ tree = append(tree, &model.ObjectTree{Object: *toModelObj(info)})
+ }
+ return &model.ArchiveMetaInfo{
+ Comment: "",
+ Encrypted: false,
+ Tree: tree,
+ }, nil
+}
+
+func (Archives) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
+ fsys, err := getFs(ss[0], args.ArchiveArgs)
+ if err != nil {
+ return nil, err
+ }
+ innerPath := strings.TrimPrefix(args.InnerPath, "/")
+ if innerPath == "" {
+ innerPath = "."
+ }
+ obj, err := fsys.ReadDir(innerPath)
+ if err != nil {
+ return nil, filterPassword(err)
+ }
+ return utils.SliceConvert(obj, func(src os.DirEntry) (model.Obj, error) {
+ info, err := src.Info()
+ if err != nil {
+ return nil, err
+ }
+ return toModelObj(info), nil
+ })
+}
+
+func (Archives) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
+ fsys, err := getFs(ss[0], args.ArchiveArgs)
+ if err != nil {
+ return nil, 0, err
+ }
+ file, err := fsys.Open(strings.TrimPrefix(args.InnerPath, "/"))
+ if err != nil {
+ return nil, 0, filterPassword(err)
+ }
+ stat, err := file.Stat()
+ if err != nil {
+ return nil, 0, filterPassword(err)
+ }
+ return file, stat.Size(), nil
+}
+
+func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
+ fsys, err := getFs(ss[0], args.ArchiveArgs)
+ if err != nil {
+ return err
+ }
+ isDir := false
+ path := strings.TrimPrefix(args.InnerPath, "/")
+ if path == "" {
+ isDir = true
+ path = "."
+ } else {
+ stat, err := fsys.Stat(path)
+ if err != nil {
+ return filterPassword(err)
+ }
+ if stat.IsDir() {
+ isDir = true
+ outputPath = stdpath.Join(outputPath, stat.Name())
+ err = os.Mkdir(outputPath, 0700)
+ if err != nil {
+ return filterPassword(err)
+ }
+ }
+ }
+ if isDir {
+ err = fs.WalkDir(fsys, path, func(p string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ relPath := strings.TrimPrefix(p, path+"/")
+ dstPath := stdpath.Join(outputPath, relPath)
+ if d.IsDir() {
+ err = os.MkdirAll(dstPath, 0700)
+ } else {
+ dir := stdpath.Dir(dstPath)
+ err = decompress(fsys, p, dir, func(_ float64) {})
+ }
+ return err
+ })
+ } else {
+ err = decompress(fsys, path, outputPath, up)
+ }
+ return filterPassword(err)
+}
+
+var _ tool.Tool = (*Archives)(nil)
+
+func init() {
+ tool.RegisterTool(Archives{})
+}
diff --git a/internal/archive/archives/utils.go b/internal/archive/archives/utils.go
new file mode 100644
index 00000000..2f499a10
--- /dev/null
+++ b/internal/archive/archives/utils.go
@@ -0,0 +1,85 @@
+package archives
+
+import (
+ "io"
+ fs2 "io/fs"
+ "os"
+ stdpath "path"
+ "strings"
+
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/mholt/archives"
+)
+
+func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.ArchiveFS, error) {
+ reader, err := stream.NewReadAtSeeker(ss, 0)
+ if err != nil {
+ return nil, err
+ }
+ if r, ok := reader.(*stream.RangeReadReadAtSeeker); ok {
+ r.InitHeadCache()
+ }
+ format, _, err := archives.Identify(ss.Ctx, ss.GetName(), reader)
+ if err != nil {
+ return nil, errs.UnknownArchiveFormat
+ }
+ extractor, ok := format.(archives.Extractor)
+ if !ok {
+ return nil, errs.UnknownArchiveFormat
+ }
+ switch f := format.(type) {
+ case archives.SevenZip:
+ f.Password = args.Password
+ case archives.Rar:
+ f.Password = args.Password
+ }
+ return &archives.ArchiveFS{
+ Stream: io.NewSectionReader(reader, 0, ss.GetSize()),
+ Format: extractor,
+ Context: ss.Ctx,
+ }, nil
+}
+
+func toModelObj(file os.FileInfo) *model.Object {
+ return &model.Object{
+ Name: file.Name(),
+ Size: file.Size(),
+ Modified: file.ModTime(),
+ IsFolder: file.IsDir(),
+ }
+}
+
+func filterPassword(err error) error {
+ if err != nil && strings.Contains(err.Error(), "password") {
+ return errs.WrongArchivePassword
+ }
+ return err
+}
+
+func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgress) error {
+ rc, err := fsys.Open(filePath)
+ if err != nil {
+ return err
+ }
+ defer rc.Close()
+ stat, err := rc.Stat()
+ if err != nil {
+ return err
+ }
+ f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ _, err = utils.CopyWithBuffer(f, &stream.ReaderUpdatingProgress{
+ Reader: &stream.SimpleReaderWithSize{
+ Reader: rc,
+ Size: stat.Size(),
+ },
+ UpdateProgress: up,
+ })
+ return err
+}
diff --git a/internal/archive/iso9660/iso9660.go b/internal/archive/iso9660/iso9660.go
new file mode 100644
index 00000000..be107d7b
--- /dev/null
+++ b/internal/archive/iso9660/iso9660.go
@@ -0,0 +1,100 @@
+package iso9660
+
+import (
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/kdomanski/iso9660"
+ "io"
+ "os"
+ stdpath "path"
+)
+
+type ISO9660 struct {
+}
+
+func (ISO9660) AcceptedExtensions() []string {
+ return []string{".iso"}
+}
+
+func (ISO9660) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
+ return map[string]tool.MultipartExtension{}
+}
+
+func (ISO9660) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
+ return &model.ArchiveMetaInfo{
+ Comment: "",
+ Encrypted: false,
+ }, nil
+}
+
+func (ISO9660) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
+ img, err := getImage(ss[0])
+ if err != nil {
+ return nil, err
+ }
+ dir, err := getObj(img, args.InnerPath)
+ if err != nil {
+ return nil, err
+ }
+ if !dir.IsDir() {
+ return nil, errs.NotFolder
+ }
+ children, err := dir.GetChildren()
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]model.Obj, 0, len(children))
+ for _, child := range children {
+ ret = append(ret, toModelObj(child))
+ }
+ return ret, nil
+}
+
+func (ISO9660) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
+ img, err := getImage(ss[0])
+ if err != nil {
+ return nil, 0, err
+ }
+ obj, err := getObj(img, args.InnerPath)
+ if err != nil {
+ return nil, 0, err
+ }
+ if obj.IsDir() {
+ return nil, 0, errs.NotFile
+ }
+ return io.NopCloser(obj.Reader()), obj.Size(), nil
+}
+
+func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
+ img, err := getImage(ss[0])
+ if err != nil {
+ return err
+ }
+ obj, err := getObj(img, args.InnerPath)
+ if err != nil {
+ return err
+ }
+ if obj.IsDir() {
+ if args.InnerPath != "/" {
+ outputPath = stdpath.Join(outputPath, obj.Name())
+ if err = os.MkdirAll(outputPath, 0700); err != nil {
+ return err
+ }
+ }
+ var children []*iso9660.File
+ if children, err = obj.GetChildren(); err == nil {
+ err = decompressAll(children, outputPath)
+ }
+ } else {
+ err = decompress(obj, outputPath, up)
+ }
+ return err
+}
+
+var _ tool.Tool = (*ISO9660)(nil)
+
+func init() {
+ tool.RegisterTool(ISO9660{})
+}
diff --git a/internal/archive/iso9660/utils.go b/internal/archive/iso9660/utils.go
new file mode 100644
index 00000000..0e4cfb1c
--- /dev/null
+++ b/internal/archive/iso9660/utils.go
@@ -0,0 +1,101 @@
+package iso9660
+
+import (
+ "os"
+ stdpath "path"
+ "strings"
+
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/kdomanski/iso9660"
+)
+
+func getImage(ss *stream.SeekableStream) (*iso9660.Image, error) {
+ reader, err := stream.NewReadAtSeeker(ss, 0)
+ if err != nil {
+ return nil, err
+ }
+ return iso9660.OpenImage(reader)
+}
+
+func getObj(img *iso9660.Image, path string) (*iso9660.File, error) {
+ obj, err := img.RootDir()
+ if err != nil {
+ return nil, err
+ }
+ if path == "/" {
+ return obj, nil
+ }
+ paths := strings.Split(strings.TrimPrefix(path, "/"), "/")
+ for _, p := range paths {
+ if !obj.IsDir() {
+ return nil, errs.ObjectNotFound
+ }
+ children, err := obj.GetChildren()
+ if err != nil {
+ return nil, err
+ }
+ exist := false
+ for _, child := range children {
+ if child.Name() == p {
+ obj = child
+ exist = true
+ break
+ }
+ }
+ if !exist {
+ return nil, errs.ObjectNotFound
+ }
+ }
+ return obj, nil
+}
+
+func toModelObj(file *iso9660.File) model.Obj {
+ return &model.Object{
+ Name: file.Name(),
+ Size: file.Size(),
+ Modified: file.ModTime(),
+ IsFolder: file.IsDir(),
+ }
+}
+
+func decompress(f *iso9660.File, path string, up model.UpdateProgress) error {
+ file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ _, err = utils.CopyWithBuffer(file, &stream.ReaderUpdatingProgress{
+ Reader: &stream.SimpleReaderWithSize{
+ Reader: f.Reader(),
+ Size: f.Size(),
+ },
+ UpdateProgress: up,
+ })
+ return err
+}
+
+func decompressAll(children []*iso9660.File, path string) error {
+ for _, child := range children {
+ if child.IsDir() {
+ nextChildren, err := child.GetChildren()
+ if err != nil {
+ return err
+ }
+ nextPath := stdpath.Join(path, child.Name())
+ if err = os.MkdirAll(nextPath, 0700); err != nil {
+ return err
+ }
+ if err = decompressAll(nextChildren, nextPath); err != nil {
+ return err
+ }
+ } else {
+ if err := decompress(child, path, func(_ float64) {}); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/internal/archive/rardecode/rardecode.go b/internal/archive/rardecode/rardecode.go
new file mode 100644
index 00000000..cd31d1a4
--- /dev/null
+++ b/internal/archive/rardecode/rardecode.go
@@ -0,0 +1,140 @@
+package rardecode
+
+import (
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/nwaples/rardecode/v2"
+ "io"
+ "os"
+ stdpath "path"
+ "strings"
+)
+
+type RarDecoder struct{}
+
+func (RarDecoder) AcceptedExtensions() []string {
+ return []string{".rar"}
+}
+
+func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
+ return map[string]tool.MultipartExtension{
+ ".part1.rar": {".part%d.rar", 2},
+ }
+}
+
+func (RarDecoder) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
+ l, err := list(ss, args.Password)
+ if err != nil {
+ return nil, err
+ }
+ _, tree := tool.GenerateMetaTreeFromFolderTraversal(l)
+ return &model.ArchiveMetaInfo{
+ Comment: "",
+ Encrypted: false,
+ Tree: tree,
+ }, nil
+}
+
+func (RarDecoder) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
+ return nil, errs.NotSupport
+}
+
+func (RarDecoder) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
+ reader, err := getReader(ss, args.Password)
+ if err != nil {
+ return nil, 0, err
+ }
+ innerPath := strings.TrimPrefix(args.InnerPath, "/")
+ for {
+ var header *rardecode.FileHeader
+ header, err = reader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, 0, err
+ }
+ if header.Name == innerPath {
+ if header.IsDir {
+ break
+ }
+ return io.NopCloser(reader), header.UnPackedSize, nil
+ }
+ }
+ return nil, 0, errs.ObjectNotFound
+}
+
+func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
+ reader, err := getReader(ss, args.Password)
+ if err != nil {
+ return err
+ }
+ if args.InnerPath == "/" {
+ for {
+ var header *rardecode.FileHeader
+ header, err = reader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ name := header.Name
+ if header.IsDir {
+ name = name + "/"
+ }
+ err = decompress(reader, header, name, outputPath)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ innerPath := strings.TrimPrefix(args.InnerPath, "/")
+ innerBase := stdpath.Base(innerPath)
+ createdBaseDir := false
+ for {
+ var header *rardecode.FileHeader
+ header, err = reader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ name := header.Name
+ if header.IsDir {
+ name = name + "/"
+ }
+ if name == innerPath {
+ err = _decompress(reader, header, outputPath, up)
+ if err != nil {
+ return err
+ }
+ break
+ } else if strings.HasPrefix(name, innerPath+"/") {
+ targetPath := stdpath.Join(outputPath, innerBase)
+ if !createdBaseDir {
+ err = os.Mkdir(targetPath, 0700)
+ if err != nil {
+ return err
+ }
+ createdBaseDir = true
+ }
+ restPath := strings.TrimPrefix(name, innerPath+"/")
+ err = decompress(reader, header, restPath, targetPath)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+var _ tool.Tool = (*RarDecoder)(nil)
+
+func init() {
+ tool.RegisterTool(RarDecoder{})
+}
diff --git a/internal/archive/rardecode/utils.go b/internal/archive/rardecode/utils.go
new file mode 100644
index 00000000..5790ec58
--- /dev/null
+++ b/internal/archive/rardecode/utils.go
@@ -0,0 +1,225 @@
+package rardecode
+
+import (
+ "fmt"
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/nwaples/rardecode/v2"
+ "io"
+ "io/fs"
+ "os"
+ stdpath "path"
+ "sort"
+ "strings"
+ "time"
+)
+
+type VolumeFile struct {
+ stream.SStreamReadAtSeeker
+ name string
+}
+
+func (v *VolumeFile) Name() string {
+ return v.name
+}
+
+func (v *VolumeFile) Size() int64 {
+ return v.SStreamReadAtSeeker.GetRawStream().GetSize()
+}
+
+func (v *VolumeFile) Mode() fs.FileMode {
+ return 0644
+}
+
+func (v *VolumeFile) ModTime() time.Time {
+ return v.SStreamReadAtSeeker.GetRawStream().ModTime()
+}
+
+func (v *VolumeFile) IsDir() bool {
+ return false
+}
+
+func (v *VolumeFile) Sys() any {
+ return nil
+}
+
+func (v *VolumeFile) Stat() (fs.FileInfo, error) {
+ return v, nil
+}
+
+func (v *VolumeFile) Close() error {
+ return nil
+}
+
+type VolumeFs struct {
+ parts map[string]*VolumeFile
+}
+
+func (v *VolumeFs) Open(name string) (fs.File, error) {
+ file, ok := v.parts[name]
+ if !ok {
+ return nil, fs.ErrNotExist
+ }
+ return file, nil
+}
+
+func makeOpts(ss []*stream.SeekableStream) (string, rardecode.Option, error) {
+ if len(ss) == 1 {
+ reader, err := stream.NewReadAtSeeker(ss[0], 0)
+ if err != nil {
+ return "", nil, err
+ }
+ fileName := "file.rar"
+ fsys := &VolumeFs{parts: map[string]*VolumeFile{
+ fileName: {SStreamReadAtSeeker: reader, name: fileName},
+ }}
+ return fileName, rardecode.FileSystem(fsys), nil
+ } else {
+ parts := make(map[string]*VolumeFile, len(ss))
+ for i, s := range ss {
+ reader, err := stream.NewReadAtSeeker(s, 0)
+ if err != nil {
+ return "", nil, err
+ }
+ fileName := fmt.Sprintf("file.part%d.rar", i+1)
+ parts[fileName] = &VolumeFile{SStreamReadAtSeeker: reader, name: fileName}
+ }
+ return "file.part1.rar", rardecode.FileSystem(&VolumeFs{parts: parts}), nil
+ }
+}
+
+type WrapReader struct {
+ files []*rardecode.File
+}
+
+func (r *WrapReader) Files() []tool.SubFile {
+ ret := make([]tool.SubFile, 0, len(r.files))
+ for _, f := range r.files {
+ ret = append(ret, &WrapFile{File: f})
+ }
+ return ret
+}
+
+type WrapFile struct {
+ *rardecode.File
+}
+
+func (f *WrapFile) Name() string {
+ if f.File.IsDir {
+ return f.File.Name + "/"
+ }
+ return f.File.Name
+}
+
+func (f *WrapFile) FileInfo() fs.FileInfo {
+ return &WrapFileInfo{File: f.File}
+}
+
+type WrapFileInfo struct {
+ *rardecode.File
+}
+
+func (f *WrapFileInfo) Name() string {
+ return stdpath.Base(f.File.Name)
+}
+
+func (f *WrapFileInfo) Size() int64 {
+ return f.File.UnPackedSize
+}
+
+func (f *WrapFileInfo) ModTime() time.Time {
+ return f.File.ModificationTime
+}
+
+func (f *WrapFileInfo) IsDir() bool {
+ return f.File.IsDir
+}
+
+func (f *WrapFileInfo) Sys() any {
+ return nil
+}
+
+func list(ss []*stream.SeekableStream, password string) (*WrapReader, error) {
+ fileName, fsOpt, err := makeOpts(ss)
+ if err != nil {
+ return nil, err
+ }
+ opts := []rardecode.Option{fsOpt}
+ if password != "" {
+ opts = append(opts, rardecode.Password(password))
+ }
+ files, err := rardecode.List(fileName, opts...)
+ // rardecode输出文件列表的顺序不一定是父目录在前,子目录在后
+ // 父路径的长度一定比子路径短,排序后的files可保证父路径在前
+ sort.Slice(files, func(i, j int) bool {
+ return len(files[i].Name) < len(files[j].Name)
+ })
+ if err != nil {
+ return nil, filterPassword(err)
+ }
+ return &WrapReader{files: files}, nil
+}
+
+func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader, error) {
+ fileName, fsOpt, err := makeOpts(ss)
+ if err != nil {
+ return nil, err
+ }
+ opts := []rardecode.Option{fsOpt}
+ if password != "" {
+ opts = append(opts, rardecode.Password(password))
+ }
+ rc, err := rardecode.OpenReader(fileName, opts...)
+ if err != nil {
+ return nil, filterPassword(err)
+ }
+ ss[0].Closers.Add(rc)
+ return &rc.Reader, nil
+}
+
+func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
+ targetPath := outputPath
+ dir, base := stdpath.Split(filePath)
+ if dir != "" {
+ targetPath = stdpath.Join(targetPath, dir)
+ err := os.MkdirAll(targetPath, 0700)
+ if err != nil {
+ return err
+ }
+ }
+ if base != "" {
+ err := _decompress(reader, header, targetPath, func(_ float64) {})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
+ f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = f.Close() }()
+ _, err = io.Copy(f, &stream.ReaderUpdatingProgress{
+ Reader: &stream.SimpleReaderWithSize{
+ Reader: reader,
+ Size: header.UnPackedSize,
+ },
+ UpdateProgress: up,
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func filterPassword(err error) error {
+ if err != nil && strings.Contains(err.Error(), "password") {
+ return errs.WrongArchivePassword
+ }
+ return err
+}
diff --git a/internal/archive/sevenzip/sevenzip.go b/internal/archive/sevenzip/sevenzip.go
new file mode 100644
index 00000000..28169966
--- /dev/null
+++ b/internal/archive/sevenzip/sevenzip.go
@@ -0,0 +1,72 @@
+package sevenzip
+
+import (
+ "io"
+ "strings"
+
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+)
+
+type SevenZip struct{}
+
+func (SevenZip) AcceptedExtensions() []string {
+ return []string{".7z"}
+}
+
+func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
+ return map[string]tool.MultipartExtension{
+ ".7z.001": {".7z.%.3d", 2},
+ }
+}
+
+func (SevenZip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
+ reader, err := getReader(ss, args.Password)
+ if err != nil {
+ return nil, err
+ }
+ _, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: reader})
+ return &model.ArchiveMetaInfo{
+ Comment: "",
+ Encrypted: args.Password != "",
+ Tree: tree,
+ }, nil
+}
+
+func (SevenZip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
+ return nil, errs.NotSupport
+}
+
+func (SevenZip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
+ reader, err := getReader(ss, args.Password)
+ if err != nil {
+ return nil, 0, err
+ }
+ innerPath := strings.TrimPrefix(args.InnerPath, "/")
+ for _, file := range reader.File {
+ if file.Name == innerPath {
+ r, e := file.Open()
+ if e != nil {
+ return nil, 0, e
+ }
+ return r, file.FileInfo().Size(), nil
+ }
+ }
+ return nil, 0, errs.ObjectNotFound
+}
+
+func (SevenZip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
+ reader, err := getReader(ss, args.Password)
+ if err != nil {
+ return err
+ }
+ return tool.DecompressFromFolderTraversal(&WrapReader{Reader: reader}, outputPath, args, up)
+}
+
+var _ tool.Tool = (*SevenZip)(nil)
+
+func init() {
+ tool.RegisterTool(SevenZip{})
+}
diff --git a/internal/archive/sevenzip/utils.go b/internal/archive/sevenzip/utils.go
new file mode 100644
index 00000000..624ba187
--- /dev/null
+++ b/internal/archive/sevenzip/utils.go
@@ -0,0 +1,61 @@
+package sevenzip
+
+import (
+ "errors"
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/bodgit/sevenzip"
+ "io"
+ "io/fs"
+)
+
+type WrapReader struct {
+ Reader *sevenzip.Reader
+}
+
+func (r *WrapReader) Files() []tool.SubFile {
+ ret := make([]tool.SubFile, 0, len(r.Reader.File))
+ for _, f := range r.Reader.File {
+ ret = append(ret, &WrapFile{f: f})
+ }
+ return ret
+}
+
+type WrapFile struct {
+ f *sevenzip.File
+}
+
+func (f *WrapFile) Name() string {
+ return f.f.Name
+}
+
+func (f *WrapFile) FileInfo() fs.FileInfo {
+ return f.f.FileInfo()
+}
+
+func (f *WrapFile) Open() (io.ReadCloser, error) {
+ return f.f.Open()
+}
+
+func getReader(ss []*stream.SeekableStream, password string) (*sevenzip.Reader, error) {
+ readerAt, err := stream.NewMultiReaderAt(ss)
+ if err != nil {
+ return nil, err
+ }
+ sr, err := sevenzip.NewReaderWithPassword(readerAt, readerAt.Size(), password)
+ if err != nil {
+ return nil, filterPassword(err)
+ }
+ return sr, nil
+}
+
+func filterPassword(err error) error {
+ if err != nil {
+ var e *sevenzip.ReadError
+ if errors.As(err, &e) && e.Encrypted {
+ return errs.WrongArchivePassword
+ }
+ }
+ return err
+}
diff --git a/internal/archive/tool/base.go b/internal/archive/tool/base.go
new file mode 100644
index 00000000..8f5b10d9
--- /dev/null
+++ b/internal/archive/tool/base.go
@@ -0,0 +1,21 @@
+package tool
+
+import (
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "io"
+)
+
+type MultipartExtension struct {
+ PartFileFormat string
+ SecondPartIndex int
+}
+
+type Tool interface {
+ AcceptedExtensions() []string
+ AcceptedMultipartExtensions() map[string]MultipartExtension
+ GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
+ List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
+ Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
+ Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
+}
diff --git a/internal/archive/tool/helper.go b/internal/archive/tool/helper.go
new file mode 100644
index 00000000..20da3446
--- /dev/null
+++ b/internal/archive/tool/helper.go
@@ -0,0 +1,204 @@
+package tool
+
+import (
+ "io"
+ "io/fs"
+ "os"
+ stdpath "path"
+ "strings"
+
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+)
+
+type SubFile interface {
+ Name() string
+ FileInfo() fs.FileInfo
+ Open() (io.ReadCloser, error)
+}
+
+type CanEncryptSubFile interface {
+ IsEncrypted() bool
+ SetPassword(password string)
+}
+
+type ArchiveReader interface {
+ Files() []SubFile
+}
+
+func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree) {
+ encrypted := false
+ dirMap := make(map[string]*model.ObjectTree)
+ for _, file := range r.Files() {
+ if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
+ encrypted = true
+ }
+
+ name := strings.TrimPrefix(file.Name(), "/")
+ var dir string
+ var dirObj *model.ObjectTree
+ isNewFolder := false
+ if !file.FileInfo().IsDir() {
+ // 先将 文件 添加到 所在的文件夹
+ dir = stdpath.Dir(name)
+ dirObj = dirMap[dir]
+ if dirObj == nil {
+ isNewFolder = dir != "."
+ dirObj = &model.ObjectTree{}
+ dirObj.IsFolder = true
+ dirObj.Name = stdpath.Base(dir)
+ dirObj.Modified = file.FileInfo().ModTime()
+ dirMap[dir] = dirObj
+ }
+ dirObj.Children = append(
+ dirObj.Children, &model.ObjectTree{
+ Object: *MakeModelObj(file.FileInfo()),
+ },
+ )
+ } else {
+ dir = strings.TrimSuffix(name, "/")
+ dirObj = dirMap[dir]
+ if dirObj == nil {
+ isNewFolder = dir != "."
+ dirObj = &model.ObjectTree{}
+ dirMap[dir] = dirObj
+ }
+ dirObj.IsFolder = true
+ dirObj.Name = stdpath.Base(dir)
+ dirObj.Modified = file.FileInfo().ModTime()
+ }
+ if isNewFolder {
+ // 将 文件夹 添加到 父文件夹
+ // 考虑压缩包仅记录文件的路径,不记录文件夹
+ // 循环创建所有父文件夹
+ parentDir := stdpath.Dir(dir)
+ for {
+ parentDirObj := dirMap[parentDir]
+ if parentDirObj == nil {
+ parentDirObj = &model.ObjectTree{}
+ if parentDir != "." {
+ parentDirObj.IsFolder = true
+ parentDirObj.Name = stdpath.Base(parentDir)
+ parentDirObj.Modified = file.FileInfo().ModTime()
+ }
+ dirMap[parentDir] = parentDirObj
+ }
+ parentDirObj.Children = append(parentDirObj.Children, dirObj)
+
+ parentDir = stdpath.Dir(parentDir)
+ if dirMap[parentDir] != nil {
+ break
+ }
+ dirObj = parentDirObj
+ }
+ }
+ }
+ if len(dirMap) > 0 {
+ return encrypted, dirMap["."].GetChildren()
+ } else {
+ return encrypted, nil
+ }
+}
+
+func MakeModelObj(file os.FileInfo) *model.Object {
+ return &model.Object{
+ Name: file.Name(),
+ Size: file.Size(),
+ Modified: file.ModTime(),
+ IsFolder: file.IsDir(),
+ }
+}
+
+type WrapFileInfo struct {
+ model.Obj
+}
+
+func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
+ var err error
+ files := r.Files()
+ if args.InnerPath == "/" {
+ for i, file := range files {
+ name := file.Name()
+ err = decompress(file, name, outputPath, args.Password)
+ if err != nil {
+ return err
+ }
+ up(float64(i+1) * 100.0 / float64(len(files)))
+ }
+ } else {
+ innerPath := strings.TrimPrefix(args.InnerPath, "/")
+ innerBase := stdpath.Base(innerPath)
+ createdBaseDir := false
+ for _, file := range files {
+ name := file.Name()
+ if name == innerPath {
+ err = _decompress(file, outputPath, args.Password, up)
+ if err != nil {
+ return err
+ }
+ break
+ } else if strings.HasPrefix(name, innerPath+"/") {
+ targetPath := stdpath.Join(outputPath, innerBase)
+ if !createdBaseDir {
+ err = os.Mkdir(targetPath, 0700)
+ if err != nil {
+ return err
+ }
+ createdBaseDir = true
+ }
+ restPath := strings.TrimPrefix(name, innerPath+"/")
+ err = decompress(file, restPath, targetPath, args.Password)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func decompress(file SubFile, filePath, outputPath, password string) error {
+ targetPath := outputPath
+ dir, base := stdpath.Split(filePath)
+ if dir != "" {
+ targetPath = stdpath.Join(targetPath, dir)
+ err := os.MkdirAll(targetPath, 0700)
+ if err != nil {
+ return err
+ }
+ }
+ if base != "" {
+ err := _decompress(file, targetPath, password, func(_ float64) {})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func _decompress(file SubFile, targetPath, password string, up model.UpdateProgress) error {
+ if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
+ encrypt.SetPassword(password)
+ }
+ rc, err := file.Open()
+ if err != nil {
+ return err
+ }
+ defer func() { _ = rc.Close() }()
+ f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = f.Close() }()
+ _, err = io.Copy(f, &stream.ReaderUpdatingProgress{
+ Reader: &stream.SimpleReaderWithSize{
+ Reader: rc,
+ Size: file.FileInfo().Size(),
+ },
+ UpdateProgress: up,
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/internal/archive/tool/utils.go b/internal/archive/tool/utils.go
new file mode 100644
index 00000000..aa92cb1d
--- /dev/null
+++ b/internal/archive/tool/utils.go
@@ -0,0 +1,32 @@
+package tool
+
+import (
+ "github.com/alist-org/alist/v3/internal/errs"
+)
+
+var (
+ Tools = make(map[string]Tool)
+ MultipartExtensions = make(map[string]MultipartExtension)
+)
+
+func RegisterTool(tool Tool) {
+ for _, ext := range tool.AcceptedExtensions() {
+ Tools[ext] = tool
+ }
+ for mainFile, ext := range tool.AcceptedMultipartExtensions() {
+ MultipartExtensions[mainFile] = ext
+ Tools[mainFile] = tool
+ }
+}
+
+func GetArchiveTool(ext string) (*MultipartExtension, Tool, error) {
+ t, ok := Tools[ext]
+ if !ok {
+ return nil, nil, errs.UnknownArchiveFormat
+ }
+ partExt, ok := MultipartExtensions[ext]
+ if !ok {
+ return nil, t, nil
+ }
+ return &partExt, t, nil
+}
diff --git a/internal/archive/zip/utils.go b/internal/archive/zip/utils.go
new file mode 100644
index 00000000..59f4ed51
--- /dev/null
+++ b/internal/archive/zip/utils.go
@@ -0,0 +1,195 @@
+package zip
+
+import (
+ "bytes"
+ "io"
+ "io/fs"
+ stdpath "path"
+ "strings"
+
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/saintfish/chardet"
+ "github.com/yeka/zip"
+ "golang.org/x/text/encoding"
+ "golang.org/x/text/encoding/charmap"
+ "golang.org/x/text/encoding/japanese"
+ "golang.org/x/text/encoding/korean"
+ "golang.org/x/text/encoding/simplifiedchinese"
+ "golang.org/x/text/encoding/traditionalchinese"
+ "golang.org/x/text/encoding/unicode"
+ "golang.org/x/text/encoding/unicode/utf32"
+ "golang.org/x/text/transform"
+)
+
+type WrapReader struct {
+ Reader *zip.Reader
+}
+
+func (r *WrapReader) Files() []tool.SubFile {
+ ret := make([]tool.SubFile, 0, len(r.Reader.File))
+ for _, f := range r.Reader.File {
+ ret = append(ret, &WrapFile{f: f})
+ }
+ return ret
+}
+
+type WrapFileInfo struct {
+ fs.FileInfo
+}
+
+func (f *WrapFileInfo) Name() string {
+ return decodeName(f.FileInfo.Name())
+}
+
+type WrapFile struct {
+ f *zip.File
+}
+
+func (f *WrapFile) Name() string {
+ return decodeName(f.f.Name)
+}
+
+func (f *WrapFile) FileInfo() fs.FileInfo {
+ return &WrapFileInfo{FileInfo: f.f.FileInfo()}
+}
+
+func (f *WrapFile) Open() (io.ReadCloser, error) {
+ return f.f.Open()
+}
+
+func (f *WrapFile) IsEncrypted() bool {
+ return f.f.IsEncrypted()
+}
+
+func (f *WrapFile) SetPassword(password string) {
+ f.f.SetPassword(password)
+}
+
+func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
+ if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
+ // FIXME: Incorrect parsing method for standard multipart zip format
+ ss = append(ss[1:], ss[0])
+ }
+ reader, err := stream.NewMultiReaderAt(ss)
+ if err != nil {
+ return nil, err
+ }
+ return zip.NewReader(reader, reader.Size())
+}
+
+func filterPassword(err error) error {
+ if err != nil && strings.Contains(err.Error(), "password") {
+ return errs.WrongArchivePassword
+ }
+ return err
+}
+
+func decodeName(name string) string {
+ b := []byte(name)
+ detector := chardet.NewTextDetector()
+ results, err := detector.DetectAll(b)
+ if err != nil {
+ return name
+ }
+ var ce, re, enc encoding.Encoding
+ for _, r := range results {
+ if r.Confidence > 30 {
+ ce = getCommonEncoding(r.Charset)
+ if ce != nil {
+ break
+ }
+ }
+ if re == nil {
+ re = getEncoding(r.Charset)
+ }
+ }
+ if ce != nil {
+ enc = ce
+ } else if re != nil {
+ enc = re
+ } else {
+ return name
+ }
+ i := bytes.NewReader(b)
+ decoder := transform.NewReader(i, enc.NewDecoder())
+ content, _ := io.ReadAll(decoder)
+ return string(content)
+}
+
+func getCommonEncoding(name string) (enc encoding.Encoding) {
+ switch name {
+ case "UTF-8":
+ enc = unicode.UTF8
+ case "UTF-16LE":
+ enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
+ case "Shift_JIS":
+ enc = japanese.ShiftJIS
+ case "GB-18030":
+ enc = simplifiedchinese.GB18030
+ case "EUC-KR":
+ enc = korean.EUCKR
+ case "Big5":
+ enc = traditionalchinese.Big5
+ default:
+ enc = nil
+ }
+ return
+}
+
+func getEncoding(name string) (enc encoding.Encoding) {
+ switch name {
+ case "UTF-8":
+ enc = unicode.UTF8
+ case "UTF-16BE":
+ enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
+ case "UTF-16LE":
+ enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
+ case "UTF-32BE":
+ enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM)
+ case "UTF-32LE":
+ enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM)
+ case "ISO-8859-1":
+ enc = charmap.ISO8859_1
+ case "ISO-8859-2":
+ enc = charmap.ISO8859_2
+ case "ISO-8859-3":
+ enc = charmap.ISO8859_3
+ case "ISO-8859-4":
+ enc = charmap.ISO8859_4
+ case "ISO-8859-5":
+ enc = charmap.ISO8859_5
+ case "ISO-8859-6":
+ enc = charmap.ISO8859_6
+ case "ISO-8859-7":
+ enc = charmap.ISO8859_7
+ case "ISO-8859-8":
+ enc = charmap.ISO8859_8
+ case "ISO-8859-8-I":
+ enc = charmap.ISO8859_8I
+ case "ISO-8859-9":
+ enc = charmap.ISO8859_9
+ case "windows-1251":
+ enc = charmap.Windows1251
+ case "windows-1256":
+ enc = charmap.Windows1256
+ case "KOI8-R":
+ enc = charmap.KOI8R
+ case "Shift_JIS":
+ enc = japanese.ShiftJIS
+ case "GB-18030":
+ enc = simplifiedchinese.GB18030
+ case "EUC-JP":
+ enc = japanese.EUCJP
+ case "EUC-KR":
+ enc = korean.EUCKR
+ case "Big5":
+ enc = traditionalchinese.Big5
+ case "ISO-2022-JP":
+ enc = japanese.ISO2022JP
+ default:
+ enc = nil
+ }
+ return
+}
diff --git a/internal/archive/zip/zip.go b/internal/archive/zip/zip.go
new file mode 100644
index 00000000..6e23570c
--- /dev/null
+++ b/internal/archive/zip/zip.go
@@ -0,0 +1,132 @@
+package zip
+
+import (
+ "io"
+ stdpath "path"
+ "strings"
+
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+)
+
+type Zip struct {
+}
+
+func (Zip) AcceptedExtensions() []string {
+ return []string{}
+}
+
+func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
+ return map[string]tool.MultipartExtension{
+ ".zip": {".z%.2d", 1},
+ ".zip.001": {".zip.%.3d", 2},
+ }
+}
+
+func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
+ zipReader, err := getReader(ss)
+ if err != nil {
+ return nil, err
+ }
+ encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
+ return &model.ArchiveMetaInfo{
+ Comment: zipReader.Comment,
+ Encrypted: encrypted,
+ Tree: tree,
+ }, nil
+}
+
+func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
+ zipReader, err := getReader(ss)
+ if err != nil {
+ return nil, err
+ }
+ if args.InnerPath == "/" {
+ ret := make([]model.Obj, 0)
+ passVerified := false
+ var dir *model.Object
+ for _, file := range zipReader.File {
+ if !passVerified && file.IsEncrypted() {
+ file.SetPassword(args.Password)
+ rc, e := file.Open()
+ if e != nil {
+ return nil, filterPassword(e)
+ }
+ _ = rc.Close()
+ passVerified = true
+ }
+ name := strings.TrimSuffix(decodeName(file.Name), "/")
+ if strings.Contains(name, "/") {
+ // 有些压缩包不压缩第一个文件夹
+ strs := strings.Split(name, "/")
+ if dir == nil && len(strs) == 2 {
+ dir = &model.Object{
+ Name: strs[0],
+ Modified: ss[0].ModTime(),
+ IsFolder: true,
+ }
+ }
+ continue
+ }
+ ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
+ }
+ if len(ret) == 0 && dir != nil {
+ ret = append(ret, dir)
+ }
+ return ret, nil
+ } else {
+ innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/"
+ ret := make([]model.Obj, 0)
+ exist := false
+ for _, file := range zipReader.File {
+ name := decodeName(file.Name)
+ dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
+ if dir != innerPath {
+ continue
+ }
+ exist = true
+ ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
+ }
+ if !exist {
+ return nil, errs.ObjectNotFound
+ }
+ return ret, nil
+ }
+}
+
+func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
+ zipReader, err := getReader(ss)
+ if err != nil {
+ return nil, 0, err
+ }
+ innerPath := strings.TrimPrefix(args.InnerPath, "/")
+ for _, file := range zipReader.File {
+ if decodeName(file.Name) == innerPath {
+ if file.IsEncrypted() {
+ file.SetPassword(args.Password)
+ }
+ r, e := file.Open()
+ if e != nil {
+ return nil, 0, e
+ }
+ return r, file.FileInfo().Size(), nil
+ }
+ }
+ return nil, 0, errs.ObjectNotFound
+}
+
+func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
+ zipReader, err := getReader(ss)
+ if err != nil {
+ return err
+ }
+ return tool.DecompressFromFolderTraversal(&WrapReader{Reader: zipReader}, outputPath, args, up)
+}
+
+var _ tool.Tool = (*Zip)(nil)
+
+func init() {
+ tool.RegisterTool(Zip{})
+}
diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go
index 27174c23..db3e2094 100644
--- a/internal/bootstrap/config.go
+++ b/internal/bootstrap/config.go
@@ -9,6 +9,7 @@ import (
"github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/caarlos0/env/v9"
log "github.com/sirupsen/logrus"
@@ -34,6 +35,8 @@ func InitConfig() {
log.Fatalf("failed to create config file: %+v", err)
}
conf.Conf = conf.DefaultConfig()
+ LastLaunchedVersion = conf.Version
+ conf.Conf.LastLaunchedVersion = conf.Version
if !utils.WriteJsonToFile(configPath, conf.Conf) {
log.Fatalf("failed to create default config file")
}
@@ -47,6 +50,10 @@ func InitConfig() {
if err != nil {
log.Fatalf("load config error: %+v", err)
}
+ LastLaunchedVersion = conf.Conf.LastLaunchedVersion
+ if strings.HasPrefix(conf.Version, "v") || LastLaunchedVersion == "" {
+ conf.Conf.LastLaunchedVersion = conf.Version
+ }
// update config.json struct
confBody, err := utils.Json.MarshalIndent(conf.Conf, "", " ")
if err != nil {
@@ -57,6 +64,9 @@ func InitConfig() {
log.Fatalf("update config struct error: %+v", err)
}
}
+ if conf.Conf.MaxConcurrency > 0 {
+ net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency}
+ }
if !conf.Conf.Force {
confFromEnv()
}
diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go
index 920a7a2d..407a5c64 100644
--- a/internal/bootstrap/data/setting.go
+++ b/internal/bootstrap/data/setting.go
@@ -1,8 +1,11 @@
package data
import (
+ "strconv"
+
"github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/offline_download/tool"
"github.com/alist-org/alist/v3/internal/op"
@@ -21,17 +24,19 @@ func initSettings() {
if err != nil {
utils.Log.Fatalf("failed get settings: %+v", err)
}
- for i := range settings {
- if !isActive(settings[i].Key) && settings[i].Flag != model.DEPRECATED {
- settings[i].Flag = model.DEPRECATED
- err = op.SaveSettingItem(&settings[i])
+ settingMap := map[string]*model.SettingItem{}
+ for _, v := range settings {
+ if !isActive(v.Key) && v.Flag != model.DEPRECATED {
+ v.Flag = model.DEPRECATED
+ err = op.SaveSettingItem(&v)
if err != nil {
utils.Log.Fatalf("failed save setting: %+v", err)
}
}
+ settingMap[v.Key] = &v
}
-
// create or save setting
+ save := false
for i := range initialSettingItems {
item := &initialSettingItems[i]
item.Index = uint(i)
@@ -39,26 +44,33 @@ func initSettings() {
item.PreDefault = item.Value
}
// err
- stored, err := op.GetSettingItemByKey(item.Key)
- if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
- utils.Log.Fatalf("failed get setting: %+v", err)
- continue
+ stored, ok := settingMap[item.Key]
+ if !ok {
+ stored, err = op.GetSettingItemByKey(item.Key)
+ if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
+ utils.Log.Fatalf("failed get setting: %+v", err)
+ continue
+ }
}
- // save
if stored != nil && item.Key != conf.VERSION && stored.Value != item.PreDefault {
item.Value = stored.Value
}
+ _, err = op.HandleSettingItemHook(item)
+ if err != nil {
+ utils.Log.Errorf("failed to execute hook on %s: %+v", item.Key, err)
+ continue
+ }
+ // save
if stored == nil || *item != *stored {
- err = op.SaveSettingItem(item)
- if err != nil {
- utils.Log.Fatalf("failed save setting: %+v", err)
- }
+ save = true
+ }
+ }
+ if save {
+ err = db.SaveSettingItems(initialSettingItems)
+ if err != nil {
+ utils.Log.Fatalf("failed save setting: %+v", err)
} else {
- // Not save so needs to execute hook
- _, err = op.HandleSettingItemHook(item)
- if err != nil {
- utils.Log.Errorf("failed to execute hook on %s: %+v", item.Key, err)
- }
+ op.SettingCacheUpdate()
}
}
}
@@ -104,7 +116,7 @@ func InitialSettings() []model.SettingItem {
{Key: conf.VideoTypes, Value: "mp4,mkv,avi,mov,rmvb,webm,flv,m3u8", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: conf.ImageTypes, Value: "jpg,tiff,jpeg,png,gif,bmp,svg,ico,swf,webp", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
//{Key: conf.OfficeTypes, Value: "doc,docx,xls,xlsx,ppt,pptx", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
- {Key: conf.ProxyTypes, Value: "m3u8", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
+ {Key: conf.ProxyTypes, Value: "m3u8,url", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: conf.ProxyIgnoreHeaders, Value: "authorization,referer", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: "external_previews", Value: `{}`, Type: conf.TypeText, Group: model.PREVIEW},
{Key: "iframe_previews", Value: `{
@@ -129,6 +141,9 @@ func InitialSettings() []model.SettingItem {
{Key: "audio_cover", Value: "https://jsd.nn.ci/gh/alist-org/logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW},
{Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
+ {Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
+ {Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
+ {Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
// global settings
{Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL},
{Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL},
@@ -164,6 +179,7 @@ func InitialSettings() []model.SettingItem {
{Key: conf.SSOApplicationName, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
{Key: conf.SSOEndpointName, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
{Key: conf.SSOJwtPublicKey, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
+ {Key: conf.SSOExtraScopes, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
{Key: conf.SSOAutoRegister, Value: "false", Type: conf.TypeBool, Group: model.SSO, Flag: model.PRIVATE},
{Key: conf.SSODefaultDir, Value: "/", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
{Key: conf.SSODefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.SSO, Flag: model.PRIVATE},
@@ -180,10 +196,32 @@ func InitialSettings() []model.SettingItem {
{Key: conf.LdapDefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.LDAP, Flag: model.PRIVATE},
{Key: conf.LdapLoginTips, Value: "login with ldap", Type: conf.TypeString, Group: model.LDAP, Flag: model.PUBLIC},
- //s3 settings
+ // s3 settings
{Key: conf.S3AccessKeyId, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
{Key: conf.S3SecretAccessKey, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
{Key: conf.S3Buckets, Value: "[]", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
+
+ // ftp settings
+ {Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
+ {Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE},
+ {Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " +
+ "Chrome/87.0.4280.88 Safari/537.36", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
+ {Key: conf.FTPMandatoryTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
+ {Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
+ {Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
+ {Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
+
+ // traffic settings
+ {Key: conf.TaskOfflineDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Download.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.TaskOfflineDownloadTransferThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Transfer.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.TaskUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Upload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.TaskCopyThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Copy.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.TaskDecompressDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Decompress.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.TaskDecompressUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.DecompressUpload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.StreamMaxClientDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.StreamMaxClientUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.StreamMaxServerDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
+ {Key: conf.StreamMaxServerUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
}
initialSettingItems = append(initialSettingItems, tool.Tools.Items()...)
if flags.Dev {
diff --git a/internal/bootstrap/data/user.go b/internal/bootstrap/data/user.go
index 3b71e498..9c3f8962 100644
--- a/internal/bootstrap/data/user.go
+++ b/internal/bootstrap/data/user.go
@@ -32,6 +32,8 @@ func initUser() {
Role: model.ADMIN,
BasePath: "/",
Authn: "[]",
+ // 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives)
+ Permission: 0x30FF,
}
if err := op.CreateUser(admin); err != nil {
panic(err)
@@ -63,39 +65,4 @@ func initUser() {
utils.Log.Fatalf("[init user] Failed to get guest user: %v", err)
}
}
- hashPwdForOldVersion()
- updateAuthnForOldVersion()
-}
-
-func hashPwdForOldVersion() {
- users, _, err := op.GetUsers(1, -1)
- if err != nil {
- utils.Log.Fatalf("[hash pwd for old version] failed get users: %v", err)
- }
- for i := range users {
- user := users[i]
- if user.PwdHash == "" {
- user.SetPassword(user.Password)
- user.Password = ""
- if err := db.UpdateUser(&user); err != nil {
- utils.Log.Fatalf("[hash pwd for old version] failed update user: %v", err)
- }
- }
- }
-}
-
-func updateAuthnForOldVersion() {
- users, _, err := op.GetUsers(1, -1)
- if err != nil {
- utils.Log.Fatalf("[update authn for old version] failed get users: %v", err)
- }
- for i := range users {
- user := users[i]
- if user.Authn == "" {
- user.Authn = "[]"
- if err := db.UpdateUser(&user); err != nil {
- utils.Log.Fatalf("[update authn for old version] failed update user: %v", err)
- }
- }
- }
}
diff --git a/internal/bootstrap/db.go b/internal/bootstrap/db.go
index 5dfa2820..5f5f6fce 100644
--- a/internal/bootstrap/db.go
+++ b/internal/bootstrap/db.go
@@ -56,20 +56,25 @@ func InitDB() {
}
case "mysql":
{
- //[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
- dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local&tls=%s",
- database.User, database.Password, database.Host, database.Port, database.Name, database.SSLMode)
- if database.DSN != "" {
- dsn = database.DSN
+ dsn := database.DSN
+ if dsn == "" {
+ //[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
+ dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local&tls=%s",
+ database.User, database.Password, database.Host, database.Port, database.Name, database.SSLMode)
}
dB, err = gorm.Open(mysql.Open(dsn), gormConfig)
}
case "postgres":
{
- dsn := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
- database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode)
- if database.DSN != "" {
- dsn = database.DSN
+ dsn := database.DSN
+ if dsn == "" {
+ if database.Password != "" {
+ dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
+ database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode)
+ } else {
+ dsn = fmt.Sprintf("host=%s user=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
+ database.Host, database.User, database.Name, database.Port, database.SSLMode)
+ }
}
dB, err = gorm.Open(postgres.Open(dsn), gormConfig)
}
diff --git a/internal/bootstrap/patch.go b/internal/bootstrap/patch.go
new file mode 100644
index 00000000..5c7ca758
--- /dev/null
+++ b/internal/bootstrap/patch.go
@@ -0,0 +1,74 @@
+package bootstrap
+
+import (
+ "fmt"
+
+ "github.com/alist-org/alist/v3/internal/bootstrap/patch"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "strings"
+)
+
+var LastLaunchedVersion = ""
+
+func safeCall(v string, i int, f func()) {
+ defer func() {
+ if r := recover(); r != nil {
+ utils.Log.Errorf("Recovered from patch (version: %s, index: %d) panic: %v", v, i, r)
+ }
+ }()
+
+ f()
+}
+
+func getVersion(v string) (major, minor, patchNum int, err error) {
+ _, err = fmt.Sscanf(v, "v%d.%d.%d", &major, &minor, &patchNum)
+ return major, minor, patchNum, err
+}
+
+func compareVersion(majorA, minorA, patchNumA, majorB, minorB, patchNumB int) bool {
+ if majorA != majorB {
+ return majorA > majorB
+ }
+ if minorA != minorB {
+ return minorA > minorB
+ }
+ if patchNumA != patchNumB {
+ return patchNumA > patchNumB
+ }
+ return true
+}
+
+func InitUpgradePatch() {
+ if !strings.HasPrefix(conf.Version, "v") {
+ for _, vp := range patch.UpgradePatches {
+ for i, p := range vp.Patches {
+ safeCall(vp.Version, i, p)
+ }
+ }
+ return
+ }
+ if LastLaunchedVersion == conf.Version {
+ return
+ }
+ if LastLaunchedVersion == "" {
+ LastLaunchedVersion = "v0.0.0"
+ }
+ major, minor, patchNum, err := getVersion(LastLaunchedVersion)
+ if err != nil {
+ utils.Log.Warnf("Failed to parse last launched version %s: %v, skipping all patches and rewrite last launched version", LastLaunchedVersion, err)
+ return
+ }
+ for _, vp := range patch.UpgradePatches {
+ ma, mi, pn, err := getVersion(vp.Version)
+ if err != nil {
+ utils.Log.Errorf("Skip invalid version %s patches: %v", vp.Version, err)
+ continue
+ }
+ if compareVersion(ma, mi, pn, major, minor, patchNum) {
+ for i, p := range vp.Patches {
+ safeCall(vp.Version, i, p)
+ }
+ }
+ }
+}
diff --git a/internal/bootstrap/patch/all.go b/internal/bootstrap/patch/all.go
new file mode 100644
index 00000000..b363d129
--- /dev/null
+++ b/internal/bootstrap/patch/all.go
@@ -0,0 +1,35 @@
+package patch
+
+import (
+ "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_24_0"
+ "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_32_0"
+ "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_41_0"
+)
+
+type VersionPatches struct {
+ // Version means if the system is upgraded from Version or an earlier one
+ // to the current version, all patches in Patches will be executed.
+ Version string
+ Patches []func()
+}
+
+var UpgradePatches = []VersionPatches{
+ {
+ Version: "v3.24.0",
+ Patches: []func(){
+ v3_24_0.HashPwdForOldVersion,
+ },
+ },
+ {
+ Version: "v3.32.0",
+ Patches: []func(){
+ v3_32_0.UpdateAuthnForOldVersion,
+ },
+ },
+ {
+ Version: "v3.41.0",
+ Patches: []func(){
+ v3_41_0.GrantAdminPermissions,
+ },
+ },
+}
diff --git a/internal/bootstrap/patch/v3_24_0/hash_password.go b/internal/bootstrap/patch/v3_24_0/hash_password.go
new file mode 100644
index 00000000..2adb640d
--- /dev/null
+++ b/internal/bootstrap/patch/v3_24_0/hash_password.go
@@ -0,0 +1,26 @@
+package v3_24_0
+
+import (
+ "github.com/alist-org/alist/v3/internal/db"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/pkg/utils"
+)
+
+// HashPwdForOldVersion encode passwords using SHA256
+// First published: 75acbcc perf: sha256 for user's password (close #3552) by Andy Hsu
+func HashPwdForOldVersion() {
+ users, _, err := op.GetUsers(1, -1)
+ if err != nil {
+ utils.Log.Fatalf("[hash pwd for old version] failed get users: %v", err)
+ }
+ for i := range users {
+ user := users[i]
+ if user.PwdHash == "" {
+ user.SetPassword(user.Password)
+ user.Password = ""
+ if err := db.UpdateUser(&user); err != nil {
+ utils.Log.Fatalf("[hash pwd for old version] failed update user: %v", err)
+ }
+ }
+ }
+}
diff --git a/internal/bootstrap/patch/v3_32_0/update_authn.go b/internal/bootstrap/patch/v3_32_0/update_authn.go
new file mode 100644
index 00000000..92a594fd
--- /dev/null
+++ b/internal/bootstrap/patch/v3_32_0/update_authn.go
@@ -0,0 +1,25 @@
+package v3_32_0
+
+import (
+ "github.com/alist-org/alist/v3/internal/db"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/pkg/utils"
+)
+
+// UpdateAuthnForOldVersion updates users' authn
+// First published: bdfc159 fix: webauthn logspam (#6181) by itsHenry
+func UpdateAuthnForOldVersion() {
+ users, _, err := op.GetUsers(1, -1)
+ if err != nil {
+ utils.Log.Fatalf("[update authn for old version] failed get users: %v", err)
+ }
+ for i := range users {
+ user := users[i]
+ if user.Authn == "" {
+ user.Authn = "[]"
+ if err := db.UpdateUser(&user); err != nil {
+ utils.Log.Fatalf("[update authn for old version] failed update user: %v", err)
+ }
+ }
+ }
+}
diff --git a/internal/bootstrap/patch/v3_41_0/grant_permission.go b/internal/bootstrap/patch/v3_41_0/grant_permission.go
new file mode 100644
index 00000000..60d8ab4f
--- /dev/null
+++ b/internal/bootstrap/patch/v3_41_0/grant_permission.go
@@ -0,0 +1,21 @@
+package v3_41_0
+
+import (
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/pkg/utils"
+)
+
+// GrantAdminPermissions gives admin Permission 0(can see hidden) - 9(webdav manage) and
+// 12(can read archives) - 13(can decompress archives)
+// This patch is written to help users upgrading from older version better adapt to PR AlistGo/alist#7705 and
+// PR AlistGo/alist#7817.
+func GrantAdminPermissions() {
+ admin, err := op.GetAdmin()
+ if err == nil && (admin.Permission & 0x33FF) == 0 {
+ admin.Permission |= 0x33FF
+ err = op.UpdateUser(admin)
+ }
+ if err != nil {
+ utils.Log.Errorf("Cannot grant permissions to admin: %v", err)
+ }
+}
diff --git a/internal/bootstrap/stream_limit.go b/internal/bootstrap/stream_limit.go
new file mode 100644
index 00000000..5ece71e4
--- /dev/null
+++ b/internal/bootstrap/stream_limit.go
@@ -0,0 +1,53 @@
+package bootstrap
+
+import (
+ "context"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "golang.org/x/time/rate"
+)
+
+type blockBurstLimiter struct {
+ *rate.Limiter
+}
+
+func (l blockBurstLimiter) WaitN(ctx context.Context, total int) error {
+ for total > 0 {
+ n := l.Burst()
+ if l.Limiter.Limit() == rate.Inf || n > total {
+ n = total
+ }
+ err := l.Limiter.WaitN(ctx, n)
+ if err != nil {
+ return err
+ }
+ total -= n
+ }
+ return nil
+}
+
+func streamFilterNegative(limit int) (rate.Limit, int) {
+ if limit < 0 {
+ return rate.Inf, 0
+ }
+ return rate.Limit(limit) * 1024.0, limit * 1024
+}
+
+func initLimiter(limiter *stream.Limiter, s string) {
+ clientDownLimit, burst := streamFilterNegative(setting.GetInt(s, -1))
+ *limiter = blockBurstLimiter{Limiter: rate.NewLimiter(clientDownLimit, burst)}
+ op.RegisterSettingChangingCallback(func() {
+ newLimit, newBurst := streamFilterNegative(setting.GetInt(s, -1))
+ (*limiter).SetLimit(newLimit)
+ (*limiter).SetBurst(newBurst)
+ })
+}
+
+func InitStreamLimit() {
+ initLimiter(&stream.ClientDownloadLimit, conf.StreamMaxClientDownloadSpeed)
+ initLimiter(&stream.ClientUploadLimit, conf.StreamMaxClientUploadSpeed)
+ initLimiter(&stream.ServerDownloadLimit, conf.StreamMaxServerDownloadSpeed)
+ initLimiter(&stream.ServerUploadLimit, conf.StreamMaxServerUploadSpeed)
+}
diff --git a/internal/bootstrap/task.go b/internal/bootstrap/task.go
index 33902353..c67e3029 100644
--- a/internal/bootstrap/task.go
+++ b/internal/bootstrap/task.go
@@ -5,15 +5,44 @@ import (
"github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/offline_download/tool"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/setting"
"github.com/xhofe/tache"
)
+func taskFilterNegative(num int) int64 {
+ if num < 0 {
+ num = 0
+ }
+ return int64(num)
+}
+
func InitTaskManager() {
- fs.UploadTaskManager = tache.NewManager[*fs.UploadTask](tache.WithWorks(conf.Conf.Tasks.Upload.Workers), tache.WithMaxRetry(conf.Conf.Tasks.Upload.MaxRetry)) //upload will not support persist
- fs.CopyTaskManager = tache.NewManager[*fs.CopyTask](tache.WithWorks(conf.Conf.Tasks.Copy.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant), db.UpdateTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Copy.MaxRetry))
- tool.DownloadTaskManager = tache.NewManager[*tool.DownloadTask](tache.WithWorks(conf.Conf.Tasks.Download.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant), db.UpdateTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Download.MaxRetry))
- tool.TransferTaskManager = tache.NewManager[*tool.TransferTask](tache.WithWorks(conf.Conf.Tasks.Transfer.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant), db.UpdateTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Transfer.MaxRetry))
+ fs.UploadTaskManager = tache.NewManager[*fs.UploadTask](tache.WithWorks(setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers)), tache.WithMaxRetry(conf.Conf.Tasks.Upload.MaxRetry)) //upload will not support persist
+ op.RegisterSettingChangingCallback(func() {
+ fs.UploadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers)))
+ })
+ fs.CopyTaskManager = tache.NewManager[*fs.CopyTask](tache.WithWorks(setting.GetInt(conf.TaskCopyThreadsNum, conf.Conf.Tasks.Copy.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant), db.UpdateTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Copy.MaxRetry))
+ op.RegisterSettingChangingCallback(func() {
+ fs.CopyTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskCopyThreadsNum, conf.Conf.Tasks.Copy.Workers)))
+ })
+ tool.DownloadTaskManager = tache.NewManager[*tool.DownloadTask](tache.WithWorks(setting.GetInt(conf.TaskOfflineDownloadThreadsNum, conf.Conf.Tasks.Download.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant), db.UpdateTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Download.MaxRetry))
+ op.RegisterSettingChangingCallback(func() {
+ tool.DownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadThreadsNum, conf.Conf.Tasks.Download.Workers)))
+ })
+ tool.TransferTaskManager = tache.NewManager[*tool.TransferTask](tache.WithWorks(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant), db.UpdateTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Transfer.MaxRetry))
+ op.RegisterSettingChangingCallback(func() {
+ tool.TransferTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers)))
+ })
if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted
CleanTempDir()
}
+ fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry))
+ op.RegisterSettingChangingCallback(func() {
+ fs.ArchiveDownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)))
+ })
+ fs.ArchiveContentUploadTaskManager.Manager = tache.NewManager[*fs.ArchiveContentUploadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressUploadThreadsNum, conf.Conf.Tasks.DecompressUpload.Workers)), tache.WithMaxRetry(conf.Conf.Tasks.DecompressUpload.MaxRetry)) //decompress upload will not support persist
+ op.RegisterSettingChangingCallback(func() {
+ fs.ArchiveContentUploadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressUploadThreadsNum, conf.Conf.Tasks.DecompressUpload.Workers)))
+ })
}
diff --git a/internal/conf/config.go b/internal/conf/config.go
index c5dc9c52..cdb86fee 100644
--- a/internal/conf/config.go
+++ b/internal/conf/config.go
@@ -35,6 +35,7 @@ type Scheme struct {
KeyFile string `json:"key_file" env:"KEY_FILE"`
UnixFile string `json:"unix_file" env:"UNIX_FILE"`
UnixFilePerm string `json:"unix_file_perm" env:"UNIX_FILE_PERM"`
+ EnableH2c bool `json:"enable_h2c" env:"ENABLE_H2C"`
}
type LogConfig struct {
@@ -53,10 +54,13 @@ type TaskConfig struct {
}
type TasksConfig struct {
- Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"`
- Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"`
- Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"`
- Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
+ Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"`
+ Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"`
+ Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"`
+ Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
+ Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"`
+ DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"`
+ AllowRetryCanceled bool `json:"allow_retry_canceled" env:"ALLOW_RETRY_CANCELED"`
}
type Cors struct {
@@ -71,6 +75,24 @@ type S3 struct {
SSL bool `json:"ssl" env:"SSL"`
}
+type FTP struct {
+ Enable bool `json:"enable" env:"ENABLE"`
+ Listen string `json:"listen" env:"LISTEN"`
+ FindPasvPortAttempts int `json:"find_pasv_port_attempts" env:"FIND_PASV_PORT_ATTEMPTS"`
+ ActiveTransferPortNon20 bool `json:"active_transfer_port_non_20" env:"ACTIVE_TRANSFER_PORT_NON_20"`
+ IdleTimeout int `json:"idle_timeout" env:"IDLE_TIMEOUT"`
+ ConnectionTimeout int `json:"connection_timeout" env:"CONNECTION_TIMEOUT"`
+ DisableActiveMode bool `json:"disable_active_mode" env:"DISABLE_ACTIVE_MODE"`
+ DefaultTransferBinary bool `json:"default_transfer_binary" env:"DEFAULT_TRANSFER_BINARY"`
+ EnableActiveConnIPCheck bool `json:"enable_active_conn_ip_check" env:"ENABLE_ACTIVE_CONN_IP_CHECK"`
+ EnablePasvConnIPCheck bool `json:"enable_pasv_conn_ip_check" env:"ENABLE_PASV_CONN_IP_CHECK"`
+}
+
+type SFTP struct {
+ Enable bool `json:"enable" env:"ENABLE"`
+ Listen string `json:"listen" env:"LISTEN"`
+}
+
type Config struct {
Force bool `json:"force" env:"FORCE"`
SiteURL string `json:"site_url" env:"SITE_URL"`
@@ -86,10 +108,14 @@ type Config struct {
Log LogConfig `json:"log"`
DelayedStart int `json:"delayed_start" env:"DELAYED_START"`
MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"`
+ MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"`
TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"`
Tasks TasksConfig `json:"tasks" envPrefix:"TASKS_"`
Cors Cors `json:"cors" envPrefix:"CORS_"`
S3 S3 `json:"s3" envPrefix:"S3_"`
+ FTP FTP `json:"ftp" envPrefix:"FTP_"`
+ SFTP SFTP `json:"sftp" envPrefix:"SFTP_"`
+ LastLaunchedVersion string `json:"last_launched_version"`
}
func DefaultConfig() *Config {
@@ -128,26 +154,37 @@ func DefaultConfig() *Config {
MaxAge: 28,
},
MaxConnections: 0,
+ MaxConcurrency: 64,
TlsInsecureSkipVerify: true,
Tasks: TasksConfig{
Download: TaskConfig{
- Workers: 5,
- MaxRetry: 1,
- TaskPersistant: true,
+ Workers: 5,
+ MaxRetry: 1,
+ // TaskPersistant: true,
},
Transfer: TaskConfig{
- Workers: 5,
- MaxRetry: 2,
- TaskPersistant: true,
+ Workers: 5,
+ MaxRetry: 2,
+ // TaskPersistant: true,
},
Upload: TaskConfig{
Workers: 5,
},
Copy: TaskConfig{
- Workers: 5,
- MaxRetry: 2,
- TaskPersistant: true,
+ Workers: 5,
+ MaxRetry: 2,
+ // TaskPersistant: true,
},
+ Decompress: TaskConfig{
+ Workers: 5,
+ MaxRetry: 2,
+ // TaskPersistant: true,
+ },
+ DecompressUpload: TaskConfig{
+ Workers: 5,
+ MaxRetry: 2,
+ },
+ AllowRetryCanceled: false,
},
Cors: Cors{
AllowOrigins: []string{"*"},
@@ -159,5 +196,22 @@ func DefaultConfig() *Config {
Port: 5246,
SSL: false,
},
+ FTP: FTP{
+ Enable: false,
+ Listen: ":5221",
+ FindPasvPortAttempts: 50,
+ ActiveTransferPortNon20: false,
+ IdleTimeout: 900,
+ ConnectionTimeout: 30,
+ DisableActiveMode: false,
+ DefaultTransferBinary: false,
+ EnableActiveConnIPCheck: true,
+ EnablePasvConnIPCheck: true,
+ },
+ SFTP: SFTP{
+ Enable: false,
+ Listen: ":5222",
+ },
+ LastLaunchedVersion: "",
}
}
diff --git a/internal/conf/const.go b/internal/conf/const.go
index 2d53702e..5cb8d850 100644
--- a/internal/conf/const.go
+++ b/internal/conf/const.go
@@ -22,15 +22,17 @@ const (
MainColor = "main_color"
// preview
- TextTypes = "text_types"
- AudioTypes = "audio_types"
- VideoTypes = "video_types"
- ImageTypes = "image_types"
- ProxyTypes = "proxy_types"
- ProxyIgnoreHeaders = "proxy_ignore_headers"
- AudioAutoplay = "audio_autoplay"
- VideoAutoplay = "video_autoplay"
-
+ TextTypes = "text_types"
+ AudioTypes = "audio_types"
+ VideoTypes = "video_types"
+ ImageTypes = "image_types"
+ ProxyTypes = "proxy_types"
+ ProxyIgnoreHeaders = "proxy_ignore_headers"
+ AudioAutoplay = "audio_autoplay"
+ VideoAutoplay = "video_autoplay"
+ PreviewArchivesByDefault = "preview_archives_by_default"
+ ReadMeAutoRender = "readme_autorender"
+ FilterReadMeScripts = "filter_readme_scripts"
// global
HideFiles = "hide_files"
CustomizeHead = "customize_head"
@@ -54,11 +56,24 @@ const (
Aria2Uri = "aria2_uri"
Aria2Secret = "aria2_secret"
+ // transmission
+ TransmissionUri = "transmission_uri"
+ TransmissionSeedtime = "transmission_seedtime"
+
+ // 115
+ Pan115TempDir = "115_temp_dir"
+
+ // pikpak
+ PikPakTempDir = "pikpak_temp_dir"
+
+ // thunder
+ ThunderTempDir = "thunder_temp_dir"
+
// single
Token = "token"
IndexProgress = "index_progress"
- //SSO
+ // SSO
SSOClientId = "sso_client_id"
SSOClientSecret = "sso_client_secret"
SSOLoginEnabled = "sso_login_enabled"
@@ -68,12 +83,13 @@ const (
SSOApplicationName = "sso_application_name"
SSOEndpointName = "sso_endpoint_name"
SSOJwtPublicKey = "sso_jwt_public_key"
+ SSOExtraScopes = "sso_extra_scopes"
SSOAutoRegister = "sso_auto_register"
SSODefaultDir = "sso_default_dir"
SSODefaultPermission = "sso_default_permission"
SSOCompatibilityMode = "sso_compatibility_mode"
- //ldap
+ // ldap
LdapLoginEnabled = "ldap_login_enabled"
LdapServer = "ldap_server"
LdapManagerDN = "ldap_manager_dn"
@@ -84,7 +100,7 @@ const (
LdapDefaultDir = "ldap_default_dir"
LdapLoginTips = "ldap_login_tips"
- //s3
+ // s3
S3Buckets = "s3_buckets"
S3AccessKeyId = "s3_access_key_id"
S3SecretAccessKey = "s3_secret_access_key"
@@ -92,12 +108,33 @@ const (
// qbittorrent
QbittorrentUrl = "qbittorrent_url"
QbittorrentSeedtime = "qbittorrent_seedtime"
+
+ // ftp
+ FTPPublicHost = "ftp_public_host"
+ FTPPasvPortMap = "ftp_pasv_port_map"
+ FTPProxyUserAgent = "ftp_proxy_user_agent"
+ FTPMandatoryTLS = "ftp_mandatory_tls"
+ FTPImplicitTLS = "ftp_implicit_tls"
+ FTPTLSPrivateKeyPath = "ftp_tls_private_key_path"
+ FTPTLSPublicCertPath = "ftp_tls_public_cert_path"
+
+ // traffic
+ TaskOfflineDownloadThreadsNum = "offline_download_task_threads_num"
+ TaskOfflineDownloadTransferThreadsNum = "offline_download_transfer_task_threads_num"
+ TaskUploadThreadsNum = "upload_task_threads_num"
+ TaskCopyThreadsNum = "copy_task_threads_num"
+ TaskDecompressDownloadThreadsNum = "decompress_download_task_threads_num"
+ TaskDecompressUploadThreadsNum = "decompress_upload_task_threads_num"
+ StreamMaxClientDownloadSpeed = "max_client_download_speed"
+ StreamMaxClientUploadSpeed = "max_client_upload_speed"
+ StreamMaxServerDownloadSpeed = "max_server_download_speed"
+ StreamMaxServerUploadSpeed = "max_server_upload_speed"
)
const (
UNKNOWN = iota
FOLDER
- //OFFICE
+ // OFFICE
VIDEO
AUDIO
TEXT
diff --git a/internal/conf/var.go b/internal/conf/var.go
index 0a8eb16f..7ae1a5ab 100644
--- a/internal/conf/var.go
+++ b/internal/conf/var.go
@@ -7,7 +7,6 @@ import (
var (
BuiltAt string
- GoVersion string
GitAuthor string
GitCommit string
Version string = "dev"
diff --git a/internal/db/db.go b/internal/db/db.go
index 2df58d37..2cd18050 100644
--- a/internal/db/db.go
+++ b/internal/db/db.go
@@ -12,7 +12,7 @@ var db *gorm.DB
func Init(d *gorm.DB) {
db = d
- err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem))
+ err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey))
if err != nil {
log.Fatalf("failed migrate database: %s", err.Error())
}
diff --git a/internal/db/sshkey.go b/internal/db/sshkey.go
new file mode 100644
index 00000000..f51dbfdc
--- /dev/null
+++ b/internal/db/sshkey.go
@@ -0,0 +1,57 @@
+package db
+
+import (
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/pkg/errors"
+)
+
+func GetSSHPublicKeyByUserId(userId uint, pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) {
+ keyDB := db.Model(&model.SSHPublicKey{})
+ query := model.SSHPublicKey{UserId: userId}
+ if err := keyDB.Where(query).Count(&count).Error; err != nil {
+ return nil, 0, errors.Wrapf(err, "failed get user's keys count")
+ }
+ if err := keyDB.Where(query).Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&keys).Error; err != nil {
+ return nil, 0, errors.Wrapf(err, "failed get find user's keys")
+ }
+ return keys, count, nil
+}
+
+func GetSSHPublicKeyById(id uint) (*model.SSHPublicKey, error) {
+ var k model.SSHPublicKey
+ if err := db.First(&k, id).Error; err != nil {
+ return nil, errors.Wrapf(err, "failed get old key")
+ }
+ return &k, nil
+}
+
+func GetSSHPublicKeyByUserTitle(userId uint, title string) (*model.SSHPublicKey, error) {
+ key := model.SSHPublicKey{UserId: userId, Title: title}
+ if err := db.Where(key).First(&key).Error; err != nil {
+ return nil, errors.Wrapf(err, "failed find key with title of user")
+ }
+ return &key, nil
+}
+
+func CreateSSHPublicKey(k *model.SSHPublicKey) error {
+ return errors.WithStack(db.Create(k).Error)
+}
+
+func UpdateSSHPublicKey(k *model.SSHPublicKey) error {
+ return errors.WithStack(db.Save(k).Error)
+}
+
+func GetSSHPublicKeys(pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) {
+ keyDB := db.Model(&model.SSHPublicKey{})
+ if err := keyDB.Count(&count).Error; err != nil {
+ return nil, 0, errors.Wrapf(err, "failed get keys count")
+ }
+ if err := keyDB.Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&keys).Error; err != nil {
+ return nil, 0, errors.Wrapf(err, "failed get find keys")
+ }
+ return keys, count, nil
+}
+
+func DeleteSSHPublicKeyById(id uint) error {
+ return errors.WithStack(db.Delete(&model.SSHPublicKey{}, id).Error)
+}
diff --git a/internal/driver/driver.go b/internal/driver/driver.go
index 781e8532..9e9440b6 100644
--- a/internal/driver/driver.go
+++ b/internal/driver/driver.go
@@ -77,7 +77,37 @@ type Remove interface {
}
type Put interface {
- Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) error
+ // Put a file (provided as a FileStreamer) into the driver
+ // Besides the most basic upload functionality, the following features also need to be implemented:
+ // 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
+ // (1) Use request methods that carry context, such as the following:
+ // a. http.NewRequestWithContext
+ // b. resty.Request.SetContext
+ // c. s3manager.Uploader.UploadWithContext
+ // d. utils.CopyWithCtx
+ // (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
+ // (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
+ // this is typically applicable to chunked uploads.
+ // 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
+ // (1) Use `utils.CopyWithCtx`
+ // (2) Use `driver.ReaderUpdatingProgress`
+ // (3) Use `driver.Progress` with `io.TeeReader`
+ // 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
+ // in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
+ // before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
+ // if your file chunks are sufficiently small (less than about 50KB).
+ // NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
+ // you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
+ // mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
+ // memory usage caused by buffering too many file chunks awaiting upload.
+ Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error
+}
+
+type PutURL interface {
+ // PutURL directly put a URL into the storage
+ // Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs
+ // Called when using SimpleHttp for offline downloading, skipping creating a download task
+ PutURL(ctx context.Context, dstDir model.Obj, name, url string) error
}
//type WriteResult interface {
@@ -106,27 +136,75 @@ type CopyResult interface {
}
type PutResult interface {
- Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) (model.Obj, error)
+ // Put a file (provided as a FileStreamer) into the driver and return the put obj
+ // Besides the most basic upload functionality, the following features also need to be implemented:
+ // 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
+ // (1) Use request methods that carry context, such as the following:
+ // a. http.NewRequestWithContext
+ // b. resty.Request.SetContext
+ // c. s3manager.Uploader.UploadWithContext
+ // d. utils.CopyWithCtx
+ // (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
+ // (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
+ // this is typically applicable to chunked uploads.
+ // 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
+ // (1) Use `utils.CopyWithCtx`
+ // (2) Use `driver.ReaderUpdatingProgress`
+ // (3) Use `driver.Progress` with `io.TeeReader`
+ // 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
+ // in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
+ // before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
+ // if your file chunks are sufficiently small (less than about 50KB).
+ // NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
+ // you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
+ // mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
+ // memory usage caused by buffering too many file chunks awaiting upload.
+ Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error)
}
-type UpdateProgress func(percentage float64)
-
-type Progress struct {
- Total int64
- Done int64
- up UpdateProgress
+type PutURLResult interface {
+ // PutURL directly put a URL into the storage
+ // Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs
+ // Called when using SimpleHttp for offline downloading, skipping creating a download task
+ PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error)
}
-func (p *Progress) Write(b []byte) (n int, err error) {
- n = len(b)
- p.Done += int64(n)
- p.up(float64(p.Done) / float64(p.Total) * 100)
- return
+type ArchiveReader interface {
+ // GetArchiveMeta get the meta-info of an archive
+ // return errs.WrongArchivePassword if the meta-info is also encrypted but provided password is wrong or empty
+ // return errs.NotImplement to use internal archive tools to get the meta-info, such as the following cases:
+ // 1. the driver do not support the format of the archive but there may be an internal tool do
+ // 2. handling archives is a VIP feature, but the driver does not have VIP access
+ GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error)
+ // ListArchive list the children of model.ArchiveArgs.InnerPath in the archive
+ // return errs.NotImplement to use internal archive tools to list the children
+ // return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
+ ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error)
+ // Extract get url/filepath/reader of a file in the archive
+ // return errs.NotImplement to use internal archive tools to extract
+ Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error)
}
-func NewProgress(total int64, up UpdateProgress) *Progress {
- return &Progress{
- Total: total,
- up: up,
- }
+type ArchiveGetter interface {
+ // ArchiveGet get file by inner path
+ // return errs.NotImplement to use internal archive tools to get the children
+ // return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
+ ArchiveGet(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (model.Obj, error)
+}
+
+type ArchiveDecompress interface {
+ ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error
+}
+
+type ArchiveDecompressResult interface {
+ // ArchiveDecompress decompress an archive
+ // when args.PutIntoNewDir, the new sub-folder should be named the same to the archive but without the extension
+ // return each decompressed obj from the root path of the archive when args.PutIntoNewDir is false
+ // return only the newly created folder when args.PutIntoNewDir is true
+ // return errs.NotImplement to use internal archive tools to decompress
+ ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
+}
+
+type Reference interface {
+ InitReference(storage Driver) error
}
diff --git a/internal/driver/utils.go b/internal/driver/utils.go
new file mode 100644
index 00000000..2af850ec
--- /dev/null
+++ b/internal/driver/utils.go
@@ -0,0 +1,62 @@
+package driver
+
+import (
+ "context"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "io"
+)
+
+type UpdateProgress = model.UpdateProgress
+
+type Progress struct {
+ Total int64
+ Done int64
+ up UpdateProgress
+}
+
+func (p *Progress) Write(b []byte) (n int, err error) {
+ n = len(b)
+ p.Done += int64(n)
+ p.up(float64(p.Done) / float64(p.Total) * 100)
+ return
+}
+
+func NewProgress(total int64, up UpdateProgress) *Progress {
+ return &Progress{
+ Total: total,
+ up: up,
+ }
+}
+
+type RateLimitReader = stream.RateLimitReader
+
+type RateLimitWriter = stream.RateLimitWriter
+
+type RateLimitFile = stream.RateLimitFile
+
+func NewLimitedUploadStream(ctx context.Context, r io.Reader) *RateLimitReader {
+ return &RateLimitReader{
+ Reader: r,
+ Limiter: stream.ServerUploadLimit,
+ Ctx: ctx,
+ }
+}
+
+func NewLimitedUploadFile(ctx context.Context, f model.File) *RateLimitFile {
+ return &RateLimitFile{
+ File: f,
+ Limiter: stream.ServerUploadLimit,
+ Ctx: ctx,
+ }
+}
+
+func ServerUploadLimitWaitN(ctx context.Context, n int) error {
+ return stream.ServerUploadLimit.WaitN(ctx, n)
+}
+
+type ReaderWithCtx = stream.ReaderWithCtx
+
+type ReaderUpdatingProgress = stream.ReaderUpdatingProgress
+
+type SimpleReaderWithSize = stream.SimpleReaderWithSize
diff --git a/internal/errs/errors.go b/internal/errs/errors.go
index ecfe43e3..2a22dca1 100644
--- a/internal/errs/errors.go
+++ b/internal/errs/errors.go
@@ -19,6 +19,10 @@ var (
StorageNotFound = errors.New("storage not found")
StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue")
StreamPeekFail = errors.New("StreamPeekFail")
+
+ UnknownArchiveFormat = errors.New("unknown archive format")
+ WrongArchivePassword = errors.New("wrong archive password")
+ DriverExtractNotSupported = errors.New("driver extraction not supported")
)
// NewErr wrap constant error with an extra message
diff --git a/internal/fs/archive.go b/internal/fs/archive.go
new file mode 100644
index 00000000..dbae9b33
--- /dev/null
+++ b/internal/fs/archive.go
@@ -0,0 +1,400 @@
+package fs
+
+import (
+ "context"
+ stderrors "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "mime"
+ "net/http"
+ "os"
+ stdpath "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/driver"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/internal/task"
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+ "github.com/xhofe/tache"
+)
+
+type ArchiveDownloadTask struct {
+ task.TaskExtension
+ model.ArchiveDecompressArgs
+ status string
+ SrcObjPath string
+ DstDirPath string
+ srcStorage driver.Driver
+ dstStorage driver.Driver
+ SrcStorageMp string
+ DstStorageMp string
+}
+
+func (t *ArchiveDownloadTask) GetName() string {
+ return fmt.Sprintf("decompress [%s](%s)[%s] to [%s](%s) with password <%s>", t.SrcStorageMp, t.SrcObjPath,
+ t.InnerPath, t.DstStorageMp, t.DstDirPath, t.Password)
+}
+
+func (t *ArchiveDownloadTask) GetStatus() string {
+ return t.status
+}
+
+func (t *ArchiveDownloadTask) Run() error {
+ t.ReinitCtx()
+ t.ClearEndTime()
+ t.SetStartTime(time.Now())
+ defer func() { t.SetEndTime(time.Now()) }()
+ uploadTask, err := t.RunWithoutPushUploadTask()
+ if err != nil {
+ return err
+ }
+ ArchiveContentUploadTaskManager.Add(uploadTask)
+ return nil
+}
+
+func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadTask, error) {
+ var err error
+ if t.srcStorage == nil {
+ t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
+ }
+ srcObj, tool, ss, err := op.GetArchiveToolAndStream(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{
+ Header: http.Header{},
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ var e error
+ for _, s := range ss {
+ e = stderrors.Join(e, s.Close())
+ }
+ if e != nil {
+ log.Errorf("failed to close file streamer, %v", e)
+ }
+ }()
+ var decompressUp model.UpdateProgress
+ if t.CacheFull {
+ var total, cur int64 = 0, 0
+ for _, s := range ss {
+ total += s.GetSize()
+ }
+ t.SetTotalBytes(total)
+ t.status = "getting src object"
+ for _, s := range ss {
+ if s.GetFile() == nil {
+ _, err = stream.CacheFullInTempFileAndUpdateProgress(s, func(p float64) {
+ t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total))
+ })
+ }
+ cur += s.GetSize()
+ if err != nil {
+ return nil, err
+ }
+ }
+ t.SetProgress(100.0)
+ decompressUp = func(_ float64) {}
+ } else {
+ decompressUp = t.SetProgress
+ }
+ t.status = "walking and decompressing"
+ dir, err := os.MkdirTemp(conf.Conf.TempDir, "dir-*")
+ if err != nil {
+ return nil, err
+ }
+ err = tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp)
+ if err != nil {
+ return nil, err
+ }
+ baseName := strings.TrimSuffix(srcObj.GetName(), stdpath.Ext(srcObj.GetName()))
+ uploadTask := &ArchiveContentUploadTask{
+ TaskExtension: task.TaskExtension{
+ Creator: t.GetCreator(),
+ },
+ ObjName: baseName,
+ InPlace: !t.PutIntoNewDir,
+ FilePath: dir,
+ DstDirPath: t.DstDirPath,
+ dstStorage: t.dstStorage,
+ DstStorageMp: t.DstStorageMp,
+ }
+ return uploadTask, nil
+}
+
+var ArchiveDownloadTaskManager *tache.Manager[*ArchiveDownloadTask]
+
+type ArchiveContentUploadTask struct {
+ task.TaskExtension
+ status string
+ ObjName string
+ InPlace bool
+ FilePath string
+ DstDirPath string
+ dstStorage driver.Driver
+ DstStorageMp string
+ finalized bool
+}
+
+func (t *ArchiveContentUploadTask) GetName() string {
+ return fmt.Sprintf("upload %s to [%s](%s)", t.ObjName, t.DstStorageMp, t.DstDirPath)
+}
+
+func (t *ArchiveContentUploadTask) GetStatus() string {
+ return t.status
+}
+
+func (t *ArchiveContentUploadTask) Run() error {
+ t.ReinitCtx()
+ t.ClearEndTime()
+ t.SetStartTime(time.Now())
+ defer func() { t.SetEndTime(time.Now()) }()
+ return t.RunWithNextTaskCallback(func(nextTsk *ArchiveContentUploadTask) error {
+ ArchiveContentUploadTaskManager.Add(nextTsk)
+ return nil
+ })
+}
+
+func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTsk *ArchiveContentUploadTask) error) error {
+ var err error
+ if t.dstStorage == nil {
+ t.dstStorage, err = op.GetStorageByMountPath(t.DstStorageMp)
+ }
+ info, err := os.Stat(t.FilePath)
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ t.status = "src object is dir, listing objs"
+ nextDstPath := t.DstDirPath
+ if !t.InPlace {
+ nextDstPath = stdpath.Join(nextDstPath, t.ObjName)
+ err = op.MakeDir(t.Ctx(), t.dstStorage, nextDstPath)
+ if err != nil {
+ return err
+ }
+ }
+ entries, err := os.ReadDir(t.FilePath)
+ if err != nil {
+ return err
+ }
+ var es error
+ for _, entry := range entries {
+ var nextFilePath string
+ if entry.IsDir() {
+ nextFilePath, err = moveToTempPath(stdpath.Join(t.FilePath, entry.Name()), "dir-")
+ } else {
+ nextFilePath, err = moveToTempPath(stdpath.Join(t.FilePath, entry.Name()), "file-")
+ }
+ if err != nil {
+ es = stderrors.Join(es, err)
+ continue
+ }
+ err = f(&ArchiveContentUploadTask{
+ TaskExtension: task.TaskExtension{
+ Creator: t.GetCreator(),
+ },
+ ObjName: entry.Name(),
+ InPlace: false,
+ FilePath: nextFilePath,
+ DstDirPath: nextDstPath,
+ dstStorage: t.dstStorage,
+ DstStorageMp: t.DstStorageMp,
+ })
+ if err != nil {
+ es = stderrors.Join(es, err)
+ }
+ }
+ if es != nil {
+ return es
+ }
+ } else {
+ t.SetTotalBytes(info.Size())
+ file, err := os.Open(t.FilePath)
+ if err != nil {
+ return err
+ }
+ fs := &stream.FileStream{
+ Obj: &model.Object{
+ Name: t.ObjName,
+ Size: info.Size(),
+ Modified: time.Now(),
+ },
+ Mimetype: mime.TypeByExtension(filepath.Ext(t.ObjName)),
+ WebPutAsTask: true,
+ Reader: file,
+ }
+ fs.Closers.Add(file)
+ t.status = "uploading"
+ err = op.Put(t.Ctx(), t.dstStorage, t.DstDirPath, fs, t.SetProgress, true)
+ if err != nil {
+ return err
+ }
+ }
+ t.deleteSrcFile()
+ return nil
+}
+
+func (t *ArchiveContentUploadTask) Cancel() {
+ t.TaskExtension.Cancel()
+ if !conf.Conf.Tasks.AllowRetryCanceled {
+ t.deleteSrcFile()
+ }
+}
+
+func (t *ArchiveContentUploadTask) deleteSrcFile() {
+ if !t.finalized {
+ _ = os.RemoveAll(t.FilePath)
+ t.finalized = true
+ }
+}
+
+func moveToTempPath(path, prefix string) (string, error) {
+ newPath, err := genTempFileName(prefix)
+ if err != nil {
+ return "", err
+ }
+ err = os.Rename(path, newPath)
+ if err != nil {
+ return "", err
+ }
+ return newPath, nil
+}
+
+func genTempFileName(prefix string) (string, error) {
+ retry := 0
+ for retry < 10000 {
+ newPath := stdpath.Join(conf.Conf.TempDir, prefix+strconv.FormatUint(uint64(rand.Uint32()), 10))
+ if _, err := os.Stat(newPath); err != nil {
+ if os.IsNotExist(err) {
+ return newPath, nil
+ } else {
+ return "", err
+ }
+ }
+ retry++
+ }
+ return "", errors.New("failed to generate temp-file name: too many retries")
+}
+
+type archiveContentUploadTaskManagerType struct {
+ *tache.Manager[*ArchiveContentUploadTask]
+}
+
+func (m *archiveContentUploadTaskManagerType) Remove(id string) {
+ if t, ok := m.GetByID(id); ok {
+ t.deleteSrcFile()
+ m.Manager.Remove(id)
+ }
+}
+
+func (m *archiveContentUploadTaskManagerType) RemoveAll() {
+ tasks := m.GetAll()
+ for _, t := range tasks {
+ m.Remove(t.GetID())
+ }
+}
+
+func (m *archiveContentUploadTaskManagerType) RemoveByState(state ...tache.State) {
+ tasks := m.GetByState(state...)
+ for _, t := range tasks {
+ m.Remove(t.GetID())
+ }
+}
+
+func (m *archiveContentUploadTaskManagerType) RemoveByCondition(condition func(task *ArchiveContentUploadTask) bool) {
+ tasks := m.GetByCondition(condition)
+ for _, t := range tasks {
+ m.Remove(t.GetID())
+ }
+}
+
+var ArchiveContentUploadTaskManager = &archiveContentUploadTaskManagerType{
+ Manager: nil,
+}
+
+func archiveMeta(ctx context.Context, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
+ storage, actualPath, err := op.GetStorageAndActualPath(path)
+ if err != nil {
+ return nil, errors.WithMessage(err, "failed get storage")
+ }
+ return op.GetArchiveMeta(ctx, storage, actualPath, args)
+}
+
+func archiveList(ctx context.Context, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
+ storage, actualPath, err := op.GetStorageAndActualPath(path)
+ if err != nil {
+ return nil, errors.WithMessage(err, "failed get storage")
+ }
+ return op.ListArchive(ctx, storage, actualPath, args)
+}
+
+func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) (task.TaskExtensionInfo, error) {
+ srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath)
+ if err != nil {
+ return nil, errors.WithMessage(err, "failed get src storage")
+ }
+ dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
+ if err != nil {
+ return nil, errors.WithMessage(err, "failed get dst storage")
+ }
+ if srcStorage.GetStorage() == dstStorage.GetStorage() {
+ err = op.ArchiveDecompress(ctx, srcStorage, srcObjActualPath, dstDirActualPath, args, lazyCache...)
+ if !errors.Is(err, errs.NotImplement) {
+ return nil, err
+ }
+ }
+ taskCreator, _ := ctx.Value("user").(*model.User)
+ tsk := &ArchiveDownloadTask{
+ TaskExtension: task.TaskExtension{
+ Creator: taskCreator,
+ },
+ ArchiveDecompressArgs: args,
+ srcStorage: srcStorage,
+ dstStorage: dstStorage,
+ SrcObjPath: srcObjActualPath,
+ DstDirPath: dstDirActualPath,
+ SrcStorageMp: srcStorage.GetStorage().MountPath,
+ DstStorageMp: dstStorage.GetStorage().MountPath,
+ }
+ if ctx.Value(conf.NoTaskKey) != nil {
+ uploadTask, err := tsk.RunWithoutPushUploadTask()
+ if err != nil {
+ return nil, errors.WithMessagef(err, "failed download [%s]", srcObjPath)
+ }
+ defer uploadTask.deleteSrcFile()
+ var callback func(t *ArchiveContentUploadTask) error
+ callback = func(t *ArchiveContentUploadTask) error {
+ e := t.RunWithNextTaskCallback(callback)
+ t.deleteSrcFile()
+ return e
+ }
+ return nil, uploadTask.RunWithNextTaskCallback(callback)
+ } else {
+ ArchiveDownloadTaskManager.Add(tsk)
+ return tsk, nil
+ }
+}
+
+func archiveDriverExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
+ storage, actualPath, err := op.GetStorageAndActualPath(path)
+ if err != nil {
+ return nil, nil, errors.WithMessage(err, "failed get storage")
+ }
+ return op.DriverExtract(ctx, storage, actualPath, args)
+}
+
+func archiveInternalExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
+ storage, actualPath, err := op.GetStorageAndActualPath(path)
+ if err != nil {
+ return nil, 0, errors.WithMessage(err, "failed get storage")
+ }
+ return op.InternalExtract(ctx, storage, actualPath, args)
+}
diff --git a/internal/fs/copy.go b/internal/fs/copy.go
index 38407c9a..155e3cf7 100644
--- a/internal/fs/copy.go
+++ b/internal/fs/copy.go
@@ -3,21 +3,24 @@ package fs
import (
"context"
"fmt"
+ "github.com/alist-org/alist/v3/internal/errs"
"net/http"
stdpath "path"
+ "time"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/internal/task"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
"github.com/xhofe/tache"
)
type CopyTask struct {
- tache.Base
+ task.TaskExtension
Status string `json:"-"` //don't save status to save space
SrcObjPath string `json:"src_path"`
DstDirPath string `json:"dst_path"`
@@ -36,6 +39,10 @@ func (t *CopyTask) GetStatus() string {
}
func (t *CopyTask) Run() error {
+ t.ReinitCtx()
+ t.ClearEndTime()
+ t.SetStartTime(time.Now())
+ defer func() { t.SetEndTime(time.Now()) }()
var err error
if t.srcStorage == nil {
t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
@@ -53,7 +60,7 @@ var CopyTaskManager *tache.Manager[*CopyTask]
// Copy if in the same storage, call move method
// if not, add copy task
-func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) {
+func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) {
srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath)
if err != nil {
return nil, errors.WithMessage(err, "failed get src storage")
@@ -64,7 +71,10 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool
}
// copy if in the same storage, just call driver.Copy
if srcStorage.GetStorage() == dstStorage.GetStorage() {
- return nil, op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...)
+ err = op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...)
+ if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.NotSupport) {
+ return nil, err
+ }
}
if ctx.Value(conf.NoTaskKey) != nil {
srcObj, err := op.Get(ctx, srcStorage, srcObjActualPath)
@@ -92,7 +102,11 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool
}
}
// not in the same storage
+ taskCreator, _ := ctx.Value("user").(*model.User)
t := &CopyTask{
+ TaskExtension: task.TaskExtension{
+ Creator: taskCreator,
+ },
srcStorage: srcStorage,
dstStorage: dstStorage,
SrcObjPath: srcObjActualPath,
@@ -123,6 +137,9 @@ func copyBetween2Storages(t *CopyTask, srcStorage, dstStorage driver.Driver, src
srcObjPath := stdpath.Join(srcObjPath, obj.GetName())
dstObjPath := stdpath.Join(dstDirPath, srcObj.GetName())
CopyTaskManager.Add(&CopyTask{
+ TaskExtension: task.TaskExtension{
+ Creator: t.GetCreator(),
+ },
srcStorage: srcStorage,
dstStorage: dstStorage,
SrcObjPath: srcObjPath,
@@ -142,6 +159,7 @@ func copyFileBetween2Storages(tsk *CopyTask, srcStorage, dstStorage driver.Drive
if err != nil {
return errors.WithMessagef(err, "failed get src [%s] file", srcFilePath)
}
+ tsk.SetTotalBytes(srcFile.GetSize())
link, _, err := op.Link(tsk.Ctx(), srcStorage, srcFilePath, model.LinkArgs{
Header: http.Header{},
})
diff --git a/internal/fs/fs.go b/internal/fs/fs.go
index 23e8a87a..01818e5f 100644
--- a/internal/fs/fs.go
+++ b/internal/fs/fs.go
@@ -2,11 +2,15 @@ package fs
import (
"context"
+ log "github.com/sirupsen/logrus"
+ "io"
+
"github.com/alist-org/alist/v3/internal/driver"
+ "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
- log "github.com/sirupsen/logrus"
- "github.com/xhofe/tache"
+ "github.com/alist-org/alist/v3/internal/task"
+ "github.com/pkg/errors"
)
// the param named path of functions in this package is a mount path
@@ -69,7 +73,7 @@ func Move(ctx context.Context, srcPath, dstDirPath string, lazyCache ...bool) er
return err
}
-func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) {
+func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) {
res, err := _copy(ctx, srcObjPath, dstDirPath, lazyCache...)
if err != nil {
log.Errorf("failed copy %s to %s: %+v", srcObjPath, dstDirPath, err)
@@ -101,14 +105,54 @@ func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer
return err
}
-func PutAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) {
- t, err := putAsTask(dstDirPath, file)
+func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskExtensionInfo, error) {
+ t, err := putAsTask(ctx, dstDirPath, file)
if err != nil {
log.Errorf("failed put %s: %+v", dstDirPath, err)
}
return t, err
}
+func ArchiveMeta(ctx context.Context, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
+ meta, err := archiveMeta(ctx, path, args)
+ if err != nil {
+ log.Errorf("failed get archive meta %s: %+v", path, err)
+ }
+ return meta, err
+}
+
+func ArchiveList(ctx context.Context, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
+ objs, err := archiveList(ctx, path, args)
+ if err != nil {
+ log.Errorf("failed list archive [%s]%s: %+v", path, args.InnerPath, err)
+ }
+ return objs, err
+}
+
+func ArchiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) (task.TaskExtensionInfo, error) {
+ t, err := archiveDecompress(ctx, srcObjPath, dstDirPath, args, lazyCache...)
+ if err != nil {
+ log.Errorf("failed decompress [%s]%s: %+v", srcObjPath, args.InnerPath, err)
+ }
+ return t, err
+}
+
+func ArchiveDriverExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
+ l, obj, err := archiveDriverExtract(ctx, path, args)
+ if err != nil {
+ log.Errorf("failed extract [%s]%s: %+v", path, args.InnerPath, err)
+ }
+ return l, obj, err
+}
+
+func ArchiveInternalExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
+ l, obj, err := archiveInternalExtract(ctx, path, args)
+ if err != nil {
+ log.Errorf("failed extract [%s]%s: %+v", path, args.InnerPath, err)
+ }
+ return l, obj, err
+}
+
type GetStoragesArgs struct {
}
@@ -127,3 +171,19 @@ func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
}
return res, err
}
+
+func PutURL(ctx context.Context, path, dstName, urlStr string) error {
+ storage, dstDirActualPath, err := op.GetStorageAndActualPath(path)
+ if err != nil {
+ return errors.WithMessage(err, "failed get storage")
+ }
+ if storage.Config().NoUpload {
+ return errors.WithStack(errs.UploadNotSupported)
+ }
+ _, ok := storage.(driver.PutURL)
+ _, okResult := storage.(driver.PutURLResult)
+ if !ok && !okResult {
+ return errs.NotImplement
+ }
+ return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
+}
diff --git a/internal/fs/put.go b/internal/fs/put.go
index 807b15e0..bc33a3ac 100644
--- a/internal/fs/put.go
+++ b/internal/fs/put.go
@@ -7,12 +7,14 @@ import (
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/task"
"github.com/pkg/errors"
"github.com/xhofe/tache"
+ "time"
)
type UploadTask struct {
- tache.Base
+ task.TaskExtension
storage driver.Driver
dstDirActualPath string
file model.FileStreamer
@@ -27,13 +29,16 @@ func (t *UploadTask) GetStatus() string {
}
func (t *UploadTask) Run() error {
+ t.ClearEndTime()
+ t.SetStartTime(time.Now())
+ defer func() { t.SetEndTime(time.Now()) }()
return op.Put(t.Ctx(), t.storage, t.dstDirActualPath, t.file, t.SetProgress, true)
}
var UploadTaskManager *tache.Manager[*UploadTask]
// putAsTask add as a put task and return immediately
-func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) {
+func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskExtensionInfo, error) {
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
if err != nil {
return nil, errors.WithMessage(err, "failed get storage")
@@ -49,11 +54,16 @@ func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo,
//file.SetReader(tempFile)
//file.SetTmpFile(tempFile)
}
+ taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
t := &UploadTask{
+ TaskExtension: task.TaskExtension{
+ Creator: taskCreator,
+ },
storage: storage,
dstDirActualPath: dstDirActualPath,
file: file,
}
+ t.SetTotalBytes(file.GetSize())
UploadTaskManager.Add(t)
return t, nil
}
diff --git a/internal/model/archive.go b/internal/model/archive.go
new file mode 100644
index 00000000..01b83691
--- /dev/null
+++ b/internal/model/archive.go
@@ -0,0 +1,53 @@
+package model
+
+import "time"
+
+type ObjTree interface {
+ Obj
+ GetChildren() []ObjTree
+}
+
+type ObjectTree struct {
+ Object
+ Children []ObjTree
+}
+
+func (t *ObjectTree) GetChildren() []ObjTree {
+ return t.Children
+}
+
+type ArchiveMeta interface {
+ GetComment() string
+ // IsEncrypted means if the content of the archive requires a password to access
+ // GetArchiveMeta should return errs.WrongArchivePassword if the meta-info is also encrypted,
+ // and the provided password is empty.
+ IsEncrypted() bool
+ // GetTree directly returns the full folder structure
+ // returns nil if the folder structure should be acquired by calling driver.ArchiveReader.ListArchive
+ GetTree() []ObjTree
+}
+
+type ArchiveMetaInfo struct {
+ Comment string
+ Encrypted bool
+ Tree []ObjTree
+}
+
+func (m *ArchiveMetaInfo) GetComment() string {
+ return m.Comment
+}
+
+func (m *ArchiveMetaInfo) IsEncrypted() bool {
+ return m.Encrypted
+}
+
+func (m *ArchiveMetaInfo) GetTree() []ObjTree {
+ return m.Tree
+}
+
+type ArchiveMetaProvider struct {
+ ArchiveMeta
+ *Sort
+ DriverProviding bool
+ Expiration *time.Duration
+}
diff --git a/internal/model/args.go b/internal/model/args.go
index 613699b9..f29c7e45 100644
--- a/internal/model/args.go
+++ b/internal/model/args.go
@@ -17,10 +17,11 @@ type ListArgs struct {
}
type LinkArgs struct {
- IP string
- Header http.Header
- Type string
- HttpReq *http.Request
+ IP string
+ Header http.Header
+ Type string
+ HttpReq *http.Request
+ Redirect bool
}
type Link struct {
@@ -48,6 +49,33 @@ type FsOtherArgs struct {
Method string `json:"method" form:"method"`
Data interface{} `json:"data" form:"data"`
}
+
+type ArchiveArgs struct {
+ Password string
+ LinkArgs
+}
+
+type ArchiveInnerArgs struct {
+ ArchiveArgs
+ InnerPath string
+}
+
+type ArchiveMetaArgs struct {
+ ArchiveArgs
+ Refresh bool
+}
+
+type ArchiveListArgs struct {
+ ArchiveInnerArgs
+ Refresh bool
+}
+
+type ArchiveDecompressArgs struct {
+ ArchiveInnerArgs
+ CacheFull bool
+ PutIntoNewDir bool
+}
+
type RangeReadCloserIF interface {
RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
utils.ClosersIF
@@ -60,7 +88,7 @@ type RangeReadCloser struct {
utils.Closers
}
-func (r RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
+func (r *RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
rc, err := r.RangeReader(ctx, httpRange)
r.Closers.Add(rc)
return rc, err
diff --git a/internal/model/obj.go b/internal/model/obj.go
index 122fb546..f0fce7a1 100644
--- a/internal/model/obj.go
+++ b/internal/model/obj.go
@@ -2,6 +2,7 @@ package model
import (
"io"
+ "os"
"sort"
"strings"
"time"
@@ -48,8 +49,12 @@ type FileStreamer interface {
RangeRead(http_range.Range) (io.Reader, error)
//for a non-seekable Stream, if Read is called, this function won't work
CacheFullInTempFile() (File, error)
+ SetTmpFile(r *os.File)
+ GetFile() File
}
+type UpdateProgress func(percentage float64)
+
type URL interface {
URL() string
}
@@ -112,12 +117,12 @@ func ExtractFolder(objs []Obj, extractFolder string) {
}
func WrapObjName(objs Obj) Obj {
- return &ObjWrapName{Obj: objs}
+ return &ObjWrapName{Name: utils.MappingName(objs.GetName()), Obj: objs}
}
func WrapObjsName(objs []Obj) {
for i := 0; i < len(objs); i++ {
- objs[i] = &ObjWrapName{Obj: objs[i]}
+ objs[i] = &ObjWrapName{Name: utils.MappingName(objs[i].GetName()), Obj: objs[i]}
}
}
diff --git a/internal/model/object.go b/internal/model/object.go
index 93f2c307..c8c10bb9 100644
--- a/internal/model/object.go
+++ b/internal/model/object.go
@@ -16,9 +16,6 @@ func (o *ObjWrapName) Unwrap() Obj {
}
func (o *ObjWrapName) GetName() string {
- if o.Name == "" {
- o.Name = utils.MappingName(o.Obj.GetName())
- }
return o.Name
}
diff --git a/internal/model/setting.go b/internal/model/setting.go
index c474935e..93b81fe5 100644
--- a/internal/model/setting.go
+++ b/internal/model/setting.go
@@ -11,6 +11,8 @@ const (
SSO
LDAP
S3
+ FTP
+ TRAFFIC
)
const (
diff --git a/internal/model/sshkey.go b/internal/model/sshkey.go
new file mode 100644
index 00000000..6e97c103
--- /dev/null
+++ b/internal/model/sshkey.go
@@ -0,0 +1,28 @@
+package model
+
+import (
+ "golang.org/x/crypto/ssh"
+ "time"
+)
+
+type SSHPublicKey struct {
+ ID uint `json:"id" gorm:"primaryKey"`
+ UserId uint `json:"-"`
+ Title string `json:"title"`
+ Fingerprint string `json:"fingerprint"`
+ KeyStr string `gorm:"type:text" json:"-"`
+ AddedTime time.Time `json:"added_time"`
+ LastUsedTime time.Time `json:"last_used_time"`
+}
+
+func (k *SSHPublicKey) GetKey() (ssh.PublicKey, error) {
+ pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.KeyStr))
+ if err != nil {
+ return nil, err
+ }
+ return pubKey, nil
+}
+
+func (k *SSHPublicKey) UpdateLastUsedTime() {
+ k.LastUsedTime = time.Now()
+}
diff --git a/internal/model/storage.go b/internal/model/storage.go
index 14bcf45f..e3c7e1f9 100644
--- a/internal/model/storage.go
+++ b/internal/model/storage.go
@@ -1,6 +1,8 @@
package model
-import "time"
+import (
+ "time"
+)
type Storage struct {
ID uint `json:"id" gorm:"primaryKey"` // unique key
@@ -13,6 +15,7 @@ type Storage struct {
Remark string `json:"remark"`
Modified time.Time `json:"modified"`
Disabled bool `json:"disabled"` // if disabled
+ DisableIndex bool `json:"disable_index"`
EnableSign bool `json:"enable_sign"`
Sort
Proxy
diff --git a/internal/model/user.go b/internal/model/user.go
index 2d61a971..eaa0fed9 100644
--- a/internal/model/user.go
+++ b/internal/model/user.go
@@ -32,16 +32,20 @@ type User struct {
Role int `json:"role"` // user's role
Disabled bool `json:"disabled"`
// Determine permissions by bit
- // 0: can see hidden files
- // 1: can access without password
- // 2: can add offline download tasks
- // 3: can mkdir and upload
- // 4: can rename
- // 5: can move
- // 6: can copy
- // 7: can remove
- // 8: webdav read
- // 9: webdav write
+ // 0: can see hidden files
+ // 1: can access without password
+ // 2: can add offline download tasks
+ // 3: can mkdir and upload
+ // 4: can rename
+ // 5: can move
+ // 6: can copy
+ // 7: can remove
+ // 8: webdav read
+ // 9: webdav write
+ // 10: ftp/sftp login and read
+ // 11: ftp/sftp write
+ // 12: can read archives
+ // 13: can decompress archives
Permission int32 `json:"permission"`
OtpSecret string `json:"-"`
SsoID string `json:"sso_id"` // unique by sso platform
@@ -78,43 +82,59 @@ func (u *User) SetPassword(pwd string) *User {
}
func (u *User) CanSeeHides() bool {
- return u.IsAdmin() || u.Permission&1 == 1
+ return u.Permission&1 == 1
}
func (u *User) CanAccessWithoutPassword() bool {
- return u.IsAdmin() || (u.Permission>>1)&1 == 1
+ return (u.Permission>>1)&1 == 1
}
func (u *User) CanAddOfflineDownloadTasks() bool {
- return u.IsAdmin() || (u.Permission>>2)&1 == 1
+ return (u.Permission>>2)&1 == 1
}
func (u *User) CanWrite() bool {
- return u.IsAdmin() || (u.Permission>>3)&1 == 1
+ return (u.Permission>>3)&1 == 1
}
func (u *User) CanRename() bool {
- return u.IsAdmin() || (u.Permission>>4)&1 == 1
+ return (u.Permission>>4)&1 == 1
}
func (u *User) CanMove() bool {
- return u.IsAdmin() || (u.Permission>>5)&1 == 1
+ return (u.Permission>>5)&1 == 1
}
func (u *User) CanCopy() bool {
- return u.IsAdmin() || (u.Permission>>6)&1 == 1
+ return (u.Permission>>6)&1 == 1
}
func (u *User) CanRemove() bool {
- return u.IsAdmin() || (u.Permission>>7)&1 == 1
+ return (u.Permission>>7)&1 == 1
}
func (u *User) CanWebdavRead() bool {
- return u.IsAdmin() || (u.Permission>>8)&1 == 1
+ return (u.Permission>>8)&1 == 1
}
func (u *User) CanWebdavManage() bool {
- return u.IsAdmin() || (u.Permission>>9)&1 == 1
+ return (u.Permission>>9)&1 == 1
+}
+
+func (u *User) CanFTPAccess() bool {
+ return (u.Permission>>10)&1 == 1
+}
+
+func (u *User) CanFTPManage() bool {
+ return (u.Permission>>11)&1 == 1
+}
+
+func (u *User) CanReadArchives() bool {
+ return (u.Permission>>12)&1 == 1
+}
+
+func (u *User) CanDecompress() bool {
+ return (u.Permission>>13)&1 == 1
}
func (u *User) JoinPath(reqPath string) (string, error) {
diff --git a/internal/net/request.go b/internal/net/request.go
index 088ff66a..a1ff6d20 100644
--- a/internal/net/request.go
+++ b/internal/net/request.go
@@ -4,15 +4,15 @@ import (
"bytes"
"context"
"fmt"
- "github.com/alist-org/alist/v3/pkg/utils"
"io"
- "math"
"net/http"
"strconv"
"strings"
"sync"
"time"
+ "github.com/alist-org/alist/v3/pkg/utils"
+
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/aws/aws-sdk-go/aws/awsutil"
log "github.com/sirupsen/logrus"
@@ -20,7 +20,7 @@ import (
// DefaultDownloadPartSize is the default range of bytes to get at a time when
// using Download().
-const DefaultDownloadPartSize = 1024 * 1024 * 10
+const DefaultDownloadPartSize = utils.MB * 10
// DefaultDownloadConcurrency is the default number of goroutines to spin up
// when using Download().
@@ -29,6 +29,8 @@ const DefaultDownloadConcurrency = 2
// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download.
const DefaultPartBodyMaxRetries = 3
+var DefaultConcurrencyLimit *ConcurrencyLimit
+
type Downloader struct {
PartSize int
@@ -43,15 +45,15 @@ type Downloader struct {
//RequestParam HttpRequestParams
HttpClient HttpRequestFunc
+
+ *ConcurrencyLimit
}
type HttpRequestFunc func(ctx context.Context, params *HttpRequestParams) (*http.Response, error)
func NewDownloader(options ...func(*Downloader)) *Downloader {
- d := &Downloader{
- HttpClient: DefaultHttpRequestFunc,
- PartSize: DefaultDownloadPartSize,
+ d := &Downloader{ //允许不设置的选项
PartBodyMaxRetries: DefaultPartBodyMaxRetries,
- Concurrency: DefaultDownloadConcurrency,
+ ConcurrencyLimit: DefaultConcurrencyLimit,
}
for _, option := range options {
option(d)
@@ -73,16 +75,16 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo
impl := downloader{params: &finalP, cfg: d, ctx: ctx}
// Ensures we don't need nil checks later on
-
- impl.partBodyMaxRetries = d.PartBodyMaxRetries
-
+ // 必需的选项
if impl.cfg.Concurrency == 0 {
impl.cfg.Concurrency = DefaultDownloadConcurrency
}
-
if impl.cfg.PartSize == 0 {
impl.cfg.PartSize = DefaultDownloadPartSize
}
+ if impl.cfg.HttpClient == nil {
+ impl.cfg.HttpClient = DefaultHttpRequestFunc
+ }
return impl.download()
}
@@ -90,7 +92,7 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo
// downloader is the implementation structure used internally by Downloader.
type downloader struct {
ctx context.Context
- cancel context.CancelFunc
+ cancel context.CancelCauseFunc
cfg Downloader
params *HttpRequestParams //http request params
@@ -100,38 +102,78 @@ type downloader struct {
m sync.Mutex
nextChunk int //next chunk id
- chunks []chunk
bufs []*Buf
- //totalBytes int64
- written int64 //total bytes of file downloaded from remote
- err error
+ written int64 //total bytes of file downloaded from remote
+ err error
- partBodyMaxRetries int
+ concurrency int //剩余的并发数,递减。到0时停止并发
+ maxPart int //有多少个分片
+ pos int64
+ maxPos int64
+ m2 sync.Mutex
+ readingID int // 正在被读取的id
+}
+
+type ConcurrencyLimit struct {
+ _m sync.Mutex
+ Limit int // 需要大于0
+}
+
+var ErrExceedMaxConcurrency = fmt.Errorf("ExceedMaxConcurrency")
+
+func (l *ConcurrencyLimit) sub() error {
+ l._m.Lock()
+ defer l._m.Unlock()
+ if l.Limit-1 < 0 {
+ return ErrExceedMaxConcurrency
+ }
+ l.Limit--
+ // log.Debugf("ConcurrencyLimit.sub: %d", l.Limit)
+ return nil
+}
+func (l *ConcurrencyLimit) add() {
+ l._m.Lock()
+ defer l._m.Unlock()
+ l.Limit++
+ // log.Debugf("ConcurrencyLimit.add: %d", l.Limit)
+}
+
+// 检测是否超过限制
+func (d *downloader) concurrencyCheck() error {
+ if d.cfg.ConcurrencyLimit != nil {
+ return d.cfg.ConcurrencyLimit.sub()
+ }
+ return nil
+}
+func (d *downloader) concurrencyFinish() {
+ if d.cfg.ConcurrencyLimit != nil {
+ d.cfg.ConcurrencyLimit.add()
+ }
}
// download performs the implementation of the object download across ranged GETs.
func (d *downloader) download() (io.ReadCloser, error) {
- d.ctx, d.cancel = context.WithCancel(d.ctx)
+ if err := d.concurrencyCheck(); err != nil {
+ return nil, err
+ }
+ d.ctx, d.cancel = context.WithCancelCause(d.ctx)
- pos := d.params.Range.Start
- maxPos := d.params.Range.Start + d.params.Range.Length
- id := 0
- for pos < maxPos {
- finalSize := int64(d.cfg.PartSize)
- //check boundary
- if pos+finalSize > maxPos {
- finalSize = maxPos - pos
- }
- c := chunk{start: pos, size: finalSize, id: id}
- d.chunks = append(d.chunks, c)
- pos += finalSize
- id++
+ maxPart := int(d.params.Range.Length / int64(d.cfg.PartSize))
+ if d.params.Range.Length%int64(d.cfg.PartSize) > 0 {
+ maxPart++
}
- if len(d.chunks) < d.cfg.Concurrency {
- d.cfg.Concurrency = len(d.chunks)
+ if maxPart < d.cfg.Concurrency {
+ d.cfg.Concurrency = maxPart
}
+ log.Debugf("cfgConcurrency:%d", d.cfg.Concurrency)
if d.cfg.Concurrency == 1 {
+ if d.cfg.ConcurrencyLimit != nil {
+ go func() {
+ <-d.ctx.Done()
+ d.concurrencyFinish()
+ }()
+ }
resp, err := d.cfg.HttpClient(d.ctx, d.params)
if err != nil {
return nil, err
@@ -142,60 +184,115 @@ func (d *downloader) download() (io.ReadCloser, error) {
// workers
d.chunkChannel = make(chan chunk, d.cfg.Concurrency)
- for i := 0; i < d.cfg.Concurrency; i++ {
- buf := NewBuf(d.ctx, d.cfg.PartSize, i)
- d.bufs = append(d.bufs, buf)
- go d.downloadPart()
- }
- // initial tasks
- for i := 0; i < d.cfg.Concurrency; i++ {
- d.sendChunkTask()
- }
+ d.maxPart = maxPart
+ d.pos = d.params.Range.Start
+ d.maxPos = d.params.Range.Start + d.params.Range.Length
+ d.concurrency = d.cfg.Concurrency
+ d.sendChunkTask(true)
- var rc io.ReadCloser = NewMultiReadCloser(d.chunks[0].buf, d.interrupt, d.finishBuf)
+ var rc io.ReadCloser = NewMultiReadCloser(d.bufs[0], d.interrupt, d.finishBuf)
// Return error
return rc, d.err
}
-func (d *downloader) sendChunkTask() *chunk {
- ch := &d.chunks[d.nextChunk]
- ch.buf = d.getBuf(d.nextChunk)
- ch.buf.Reset(int(ch.size))
- d.chunkChannel <- *ch
- d.nextChunk++
- return ch
+
+func (d *downloader) sendChunkTask(newConcurrency bool) error {
+ d.m.Lock()
+ defer d.m.Unlock()
+ isNewBuf := d.concurrency > 0
+ if newConcurrency {
+ if d.concurrency <= 0 {
+ return nil
+ }
+ if d.nextChunk > 0 { // 第一个不检查,因为已经检查过了
+ if err := d.concurrencyCheck(); err != nil {
+ return err
+ }
+ }
+ d.concurrency--
+ go d.downloadPart()
+ }
+
+ var buf *Buf
+ if isNewBuf {
+ buf = NewBuf(d.ctx, d.cfg.PartSize)
+ d.bufs = append(d.bufs, buf)
+ } else {
+ buf = d.getBuf(d.nextChunk)
+ }
+
+ if d.pos < d.maxPos {
+ finalSize := int64(d.cfg.PartSize)
+ switch d.nextChunk {
+ case 0:
+ // 最小分片在前面有助视频播放?
+ firstSize := d.params.Range.Length % finalSize
+ if firstSize > 0 {
+ minSize := finalSize / 2
+ if firstSize < minSize { // 最小分片太小就调整到一半
+ finalSize = minSize
+ } else {
+ finalSize = firstSize
+ }
+ }
+ case 1:
+ firstSize := d.params.Range.Length % finalSize
+ minSize := finalSize / 2
+ if firstSize > 0 && firstSize < minSize {
+ finalSize += firstSize - minSize
+ }
+ }
+ buf.Reset(int(finalSize))
+ ch := chunk{
+ start: d.pos,
+ size: finalSize,
+ id: d.nextChunk,
+ buf: buf,
+
+ newConcurrency: newConcurrency,
+ }
+ d.pos += finalSize
+ d.nextChunk++
+ d.chunkChannel <- ch
+ return nil
+ }
+ return nil
}
// when the final reader Close, we interrupt
func (d *downloader) interrupt() error {
- d.cancel()
if d.written != d.params.Range.Length {
log.Debugf("Downloader interrupt before finish")
if d.getErr() == nil {
d.setErr(fmt.Errorf("interrupted"))
}
}
+ d.cancel(d.err)
defer func() {
close(d.chunkChannel)
for _, buf := range d.bufs {
buf.Close()
}
+ if d.concurrency > 0 {
+ d.concurrency = -d.concurrency
+ }
+ log.Debugf("maxConcurrency:%d", d.cfg.Concurrency+d.concurrency)
}()
return d.err
}
func (d *downloader) getBuf(id int) (b *Buf) {
-
- return d.bufs[id%d.cfg.Concurrency]
+ return d.bufs[id%len(d.bufs)]
}
-func (d *downloader) finishBuf(id int) (isLast bool, buf *Buf) {
- if id >= len(d.chunks)-1 {
+func (d *downloader) finishBuf(id int) (isLast bool, nextBuf *Buf) {
+ id++
+ if id >= d.maxPart {
return true, nil
}
- if d.nextChunk > id+1 {
- return false, d.getBuf(id + 1)
- }
- ch := d.sendChunkTask()
- return false, ch.buf
+
+ d.sendChunkTask(false)
+
+ d.readingID = id
+ return false, d.getBuf(id)
}
// downloadPart is an individual goroutine worker reading from the ch channel
@@ -210,58 +307,122 @@ func (d *downloader) downloadPart() {
if d.getErr() != nil {
// Drain the channel if there is an error, to prevent deadlocking
// of download producer.
- continue
+ break
}
- log.Debugf("downloadPart tried to get chunk")
if err := d.downloadChunk(&c); err != nil {
+ if err == errCancelConcurrency {
+ break
+ }
+ if err == context.Canceled {
+ if e := context.Cause(d.ctx); e != nil {
+ err = e
+ }
+ }
d.setErr(err)
+ d.cancel(err)
}
}
+ d.concurrencyFinish()
}
// downloadChunk downloads the chunk
func (d *downloader) downloadChunk(ch *chunk) error {
- log.Debugf("start new chunk %+v buffer_id =%d", ch, ch.id)
+ log.Debugf("start chunk_%d, %+v", ch.id, ch)
+ params := d.getParamsFromChunk(ch)
var n int64
var err error
- params := d.getParamsFromChunk(ch)
- for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
+ for retry := 0; retry <= d.cfg.PartBodyMaxRetries; retry++ {
if d.getErr() != nil {
- return d.getErr()
+ return nil
}
n, err = d.tryDownloadChunk(params, ch)
if err == nil {
+ d.incrWritten(n)
+ log.Debugf("chunk_%d downloaded", ch.id)
break
}
- // Check if the returned error is an errReadingBody.
- // If err is errReadingBody this indicates that an error
- // occurred while copying the http response body.
+ if d.getErr() != nil {
+ return nil
+ }
+ if utils.IsCanceled(d.ctx) {
+ return d.ctx.Err()
+ }
+ // Check if the returned error is an errNeedRetry.
// If this occurs we unwrap the err to set the underlying error
// and attempt any remaining retries.
- if bodyErr, ok := err.(*errReadingBody); ok {
- err = bodyErr.Unwrap()
+ if e, ok := err.(*errNeedRetry); ok {
+ err = e.Unwrap()
+ if n > 0 {
+ // 测试:下载时 断开 alist向云盘发起的下载连接
+ // 校验:下载完后校验文件哈希值 一致
+ d.incrWritten(n)
+ ch.start += n
+ ch.size -= n
+ params.Range.Start = ch.start
+ params.Range.Length = ch.size
+ }
+ log.Warnf("err chunk_%d, object part download error %s, retrying attempt %d. %v",
+ ch.id, params.URL, retry, err)
+ } else if err == errInfiniteRetry {
+ retry--
+ continue
} else {
- return err
+ break
}
-
- //ch.cur = 0
-
- log.Debugf("object part body download interrupted %s, err, %v, retrying attempt %d",
- params.URL, err, retry)
}
- d.incrWritten(n)
- log.Debugf("down_%d downloaded chunk", ch.id)
- //ch.buf.buffer.wg1.Wait()
- //log.Debugf("down_%d downloaded chunk,wg wait passed", ch.id)
return err
}
-func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
+var errCancelConcurrency = fmt.Errorf("cancel concurrency")
+var errInfiniteRetry = fmt.Errorf("infinite retry")
+func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
resp, err := d.cfg.HttpClient(d.ctx, params)
if err != nil {
- return 0, err
+ if resp == nil {
+ return 0, err
+ }
+ if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
+ return 0, err
+ }
+ if ch.id == 0 { //第1个任务 有限的重试,超过重试就会结束请求
+ switch resp.StatusCode {
+ default:
+ return 0, err
+ case http.StatusTooManyRequests:
+ case http.StatusBadGateway:
+ case http.StatusServiceUnavailable:
+ case http.StatusGatewayTimeout:
+ }
+ <-time.After(time.Millisecond * 200)
+ return 0, &errNeedRetry{err: fmt.Errorf("http request failure,status: %d", resp.StatusCode)}
+ }
+
+ // 来到这 说明第1个分片下载 连接成功了
+ // 后续分片下载出错都当超载处理
+ log.Debugf("err chunk_%d, try downloading:%v", ch.id, err)
+
+ d.m.Lock()
+ isCancelConcurrency := ch.newConcurrency
+ if d.concurrency > 0 { // 取消剩余的并发任务
+ // 用于计算实际的并发数
+ d.concurrency = -d.concurrency
+ isCancelConcurrency = true
+ }
+ if isCancelConcurrency {
+ d.concurrency--
+ d.chunkChannel <- *ch
+ d.m.Unlock()
+ return 0, errCancelConcurrency
+ }
+ d.m.Unlock()
+ if ch.id != d.readingID { //正在被读取的优先重试
+ d.m2.Lock()
+ defer d.m2.Unlock()
+ <-time.After(time.Millisecond * 200)
+ }
+ return 0, errInfiniteRetry
}
defer resp.Body.Close()
//only check file size on the first task
@@ -271,15 +432,15 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int
return 0, err
}
}
-
+ d.sendChunkTask(true)
n, err := utils.CopyWithBuffer(ch.buf, resp.Body)
if err != nil {
- return n, &errReadingBody{err: err}
+ return n, &errNeedRetry{err: err}
}
if n != ch.size {
err = fmt.Errorf("chunk download size incorrect, expected=%d, got=%d", ch.size, n)
- return n, &errReadingBody{err: err}
+ return n, &errNeedRetry{err: err}
}
return n, nil
@@ -295,7 +456,7 @@ func (d *downloader) getParamsFromChunk(ch *chunk) *HttpRequestParams {
func (d *downloader) checkTotalBytes(resp *http.Response) error {
var err error
- var totalBytes int64 = math.MinInt64
+ totalBytes := int64(-1)
contentRange := resp.Header.Get("Content-Range")
if len(contentRange) == 0 {
// ContentRange is nil when the full file contents is provided, and
@@ -327,8 +488,9 @@ func (d *downloader) checkTotalBytes(resp *http.Response) error {
err = fmt.Errorf("expect file size=%d unmatch remote report size=%d, need refresh cache", d.params.Size, totalBytes)
}
if err != nil {
- _ = d.interrupt()
+ // _ = d.interrupt()
d.setErr(err)
+ d.cancel(err)
}
return err
@@ -367,9 +529,7 @@ type chunk struct {
buf *Buf
id int
- // Downloader takes range (start,length), but this chunk is requesting equal/sub range of it.
- // To convert the writer to reader eventually, we need to write within the boundary
- //boundary http_range.Range
+ newConcurrency bool
}
func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
@@ -377,7 +537,7 @@ func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*ht
res, err := RequestHttp(ctx, "GET", header, params.URL)
if err != nil {
- return nil, err
+ return res, err
}
return res, nil
}
@@ -390,15 +550,15 @@ type HttpRequestParams struct {
//total file size
Size int64
}
-type errReadingBody struct {
+type errNeedRetry struct {
err error
}
-func (e *errReadingBody) Error() string {
- return fmt.Sprintf("failed to read part body: %v", e.err)
+func (e *errNeedRetry) Error() string {
+ return e.err.Error()
}
-func (e *errReadingBody) Unwrap() error {
+func (e *errNeedRetry) Unwrap() error {
return e.err
}
@@ -436,9 +596,13 @@ func (mr MultiReadCloser) Read(p []byte) (n int, err error) {
}
mr.cfg.curBuf = next
mr.cfg.rPos++
- //current.Close()
return n, nil
}
+ if err == context.Canceled {
+ if e := context.Cause(mr.cfg.curBuf.ctx); e != nil {
+ err = e
+ }
+ }
return n, err
}
func (mr MultiReadCloser) Close() error {
@@ -451,18 +615,15 @@ type Buf struct {
ctx context.Context
off int
rw sync.Mutex
- //notify chan struct{}
}
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
// when read is faster write, immediately feed data to read after written
-func NewBuf(ctx context.Context, maxSize int, id int) *Buf {
- d := make([]byte, 0, maxSize)
+func NewBuf(ctx context.Context, maxSize int) *Buf {
return &Buf{
ctx: ctx,
- buffer: bytes.NewBuffer(d),
+ buffer: bytes.NewBuffer(make([]byte, 0, maxSize)),
size: maxSize,
- //notify: make(chan struct{}),
}
}
func (br *Buf) Reset(size int) {
@@ -500,8 +661,6 @@ func (br *Buf) Read(p []byte) (n int, err error) {
select {
case <-br.ctx.Done():
return 0, br.ctx.Err()
- //case <-br.notify:
- // return 0, nil
case <-time.After(time.Millisecond * 200):
return 0, nil
}
@@ -514,13 +673,9 @@ func (br *Buf) Write(p []byte) (n int, err error) {
br.rw.Lock()
defer br.rw.Unlock()
n, err = br.buffer.Write(p)
- select {
- //case br.notify <- struct{}{}:
- default:
- }
return
}
func (br *Buf) Close() {
- //close(br.notify)
+ br.buffer = nil
}
diff --git a/internal/net/serve.go b/internal/net/serve.go
index 0eb8cbb8..bdeac0ac 100644
--- a/internal/net/serve.go
+++ b/internal/net/serve.go
@@ -3,6 +3,7 @@ package net
import (
"compress/gzip"
"context"
+ "crypto/tls"
"fmt"
"io"
"mime"
@@ -14,7 +15,6 @@ import (
"sync"
"time"
- "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
@@ -52,18 +52,19 @@ import (
//
// If the caller has set w's ETag header formatted per RFC 7232, section 2.3,
// ServeHTTP uses it to handle requests using If-Match, If-None-Match, or If-Range.
-func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReaderFunc model.RangeReaderFunc) {
+func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReadCloser model.RangeReadCloserIF) error {
+ defer RangeReadCloser.Close()
setLastModified(w, modTime)
done, rangeReq := checkPreconditions(w, r, modTime)
if done {
- return
+ return nil
}
if size < 0 {
// since too many functions need file size to work,
// will not implement the support of unknown file size here
http.Error(w, "negative content size not supported", http.StatusInternalServerError)
- return
+ return nil
}
code := http.StatusOK
@@ -102,7 +103,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
fallthrough
default:
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return
+ return nil
}
if sumRangesSize(ranges) > size {
@@ -110,12 +111,20 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
// or unknown file size, ignore the range request.
ranges = nil
}
+
+ // 使用请求的Context
+ // 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞
+ ctx := context.WithValue(r.Context(), "request_header", r.Header)
switch {
case len(ranges) == 0:
- reader, err := RangeReaderFunc(context.Background(), http_range.Range{Length: -1})
+ reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ code = http.StatusRequestedRangeNotSatisfiable
+ if err == ErrExceedMaxConcurrency {
+ code = http.StatusTooManyRequests
+ }
+ http.Error(w, err.Error(), code)
+ return nil
}
sendContent = reader
case len(ranges) == 1:
@@ -131,10 +140,14 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
// does not request multiple parts might not support
// multipart responses."
ra := ranges[0]
- sendContent, err = RangeReaderFunc(context.Background(), ra)
+ sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
if err != nil {
- http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
- return
+ code = http.StatusRequestedRangeNotSatisfiable
+ if err == ErrExceedMaxConcurrency {
+ code = http.StatusTooManyRequests
+ }
+ http.Error(w, err.Error(), code)
+ return nil
}
sendSize = ra.Length
code = http.StatusPartialContent
@@ -158,7 +171,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
pw.CloseWithError(err)
return
}
- reader, err := RangeReaderFunc(context.Background(), ra)
+ reader, err := RangeReadCloser.RangeRead(ctx, ra)
if err != nil {
pw.CloseWithError(err)
return
@@ -167,14 +180,12 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
pw.CloseWithError(err)
return
}
- //defer reader.Close()
}
mw.Close()
pw.Close()
}()
}
- defer sendContent.Close()
w.Header().Set("Accept-Ranges", "bytes")
if w.Header().Get("Content-Encoding") == "" {
@@ -190,9 +201,15 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
if written != sendSize {
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
}
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ code = http.StatusInternalServerError
+ if err == ErrExceedMaxConcurrency {
+ code = http.StatusTooManyRequests
+ }
+ w.WriteHeader(code)
+ return err
}
}
+ return nil
}
func ProcessHeader(origin, override http.Header) http.Header {
result := http.Header{}
@@ -239,7 +256,7 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea
_ = res.Body.Close()
msg := string(all)
log.Debugln(msg)
- return nil, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, res.StatusCode, msg)
+ return res, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, res.StatusCode, msg)
}
return res, nil
}
@@ -249,7 +266,7 @@ var httpClient *http.Client
func HttpClient() *http.Client {
once.Do(func() {
- httpClient = base.NewHttpClient()
+ httpClient = NewHttpClient()
httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
@@ -260,3 +277,13 @@ func HttpClient() *http.Client {
})
return httpClient
}
+
+func NewHttpClient() *http.Client {
+ return &http.Client{
+ Timeout: time.Hour * 48,
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
+ },
+ }
+}
diff --git a/internal/net/util.go b/internal/net/util.go
index 44201859..5b335a7f 100644
--- a/internal/net/util.go
+++ b/internal/net/util.go
@@ -2,7 +2,6 @@ package net
import (
"fmt"
- "github.com/alist-org/alist/v3/pkg/utils"
"io"
"math"
"mime/multipart"
@@ -11,6 +10,8 @@ import (
"strings"
"time"
+ "github.com/alist-org/alist/v3/pkg/utils"
+
"github.com/alist-org/alist/v3/pkg/http_range"
log "github.com/sirupsen/logrus"
)
@@ -70,6 +71,7 @@ func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult {
if im == "" {
return condNone
}
+ r.Header.Del("If-Match")
for {
im = textproto.TrimString(im)
if len(im) == 0 {
@@ -97,7 +99,11 @@ func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult {
func checkIfUnmodifiedSince(r *http.Request, modtime time.Time) condResult {
ius := r.Header.Get("If-Unmodified-Since")
- if ius == "" || isZeroTime(modtime) {
+ if ius == "" {
+ return condNone
+ }
+ r.Header.Del("If-Unmodified-Since")
+ if isZeroTime(modtime) {
return condNone
}
t, err := http.ParseTime(ius)
@@ -119,6 +125,7 @@ func checkIfNoneMatch(w http.ResponseWriter, r *http.Request) condResult {
if inm == "" {
return condNone
}
+ r.Header.Del("If-None-Match")
buf := inm
for {
buf = textproto.TrimString(buf)
@@ -149,7 +156,11 @@ func checkIfModifiedSince(r *http.Request, modtime time.Time) condResult {
return condNone
}
ims := r.Header.Get("If-Modified-Since")
- if ims == "" || isZeroTime(modtime) {
+ if ims == "" {
+ return condNone
+ }
+ r.Header.Del("If-Modified-Since")
+ if isZeroTime(modtime) {
return condNone
}
t, err := http.ParseTime(ims)
@@ -173,6 +184,7 @@ func checkIfRange(w http.ResponseWriter, r *http.Request, modtime time.Time) con
if ir == "" {
return condNone
}
+ r.Header.Del("If-Range")
etag, _ := scanETag(ir)
if etag != "" {
if etagStrongMatch(etag, w.Header().Get("Etag")) {
diff --git a/internal/offline_download/115/client.go b/internal/offline_download/115/client.go
index 0ebf38ff..3f9d804d 100644
--- a/internal/offline_download/115/client.go
+++ b/internal/offline_download/115/client.go
@@ -3,6 +3,8 @@ package _115
import (
"context"
"fmt"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/drivers/115"
"github.com/alist-org/alist/v3/internal/errs"
@@ -33,13 +35,23 @@ func (p *Cloud115) Init() (string, error) {
}
func (p *Cloud115) IsReady() bool {
+ tempDir := setting.GetStr(conf.Pan115TempDir)
+ if tempDir == "" {
+ return false
+ }
+ storage, _, err := op.GetStorageAndActualPath(tempDir)
+ if err != nil {
+ return false
+ }
+ if _, ok := storage.(*_115.Pan115); !ok {
+ return false
+ }
return true
}
func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) {
// 添加新任务刷新缓存
p.refreshTaskCache = true
- // args.TempDir 已经被修改为了 DstDirPath
storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir)
if err != nil {
return "", err
@@ -50,6 +62,11 @@ func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) {
}
ctx := context.Background()
+
+ if err := op.MakeDir(ctx, storage, actualPath); err != nil {
+ return "", err
+ }
+
parentDir, err := op.GetUnwrap(ctx, storage, actualPath)
if err != nil {
return "", err
@@ -64,7 +81,7 @@ func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) {
}
func (p *Cloud115) Remove(task *tool.DownloadTask) error {
- storage, _, err := op.GetStorageAndActualPath(task.DstDirPath)
+ storage, _, err := op.GetStorageAndActualPath(task.TempDir)
if err != nil {
return err
}
@@ -81,7 +98,7 @@ func (p *Cloud115) Remove(task *tool.DownloadTask) error {
}
func (p *Cloud115) Status(task *tool.DownloadTask) (*tool.Status, error) {
- storage, _, err := op.GetStorageAndActualPath(task.DstDirPath)
+ storage, _, err := op.GetStorageAndActualPath(task.TempDir)
if err != nil {
return nil, err
}
@@ -107,6 +124,7 @@ func (p *Cloud115) Status(task *tool.DownloadTask) (*tool.Status, error) {
s.Progress = t.Percent
s.Status = t.GetStatus()
s.Completed = t.IsDone()
+ s.TotalBytes = t.Size
if t.IsFailed() {
s.Err = fmt.Errorf(t.GetStatus())
}
diff --git a/internal/offline_download/all.go b/internal/offline_download/all.go
index ee80b5a0..3d0c7c73 100644
--- a/internal/offline_download/all.go
+++ b/internal/offline_download/all.go
@@ -6,4 +6,6 @@ import (
_ "github.com/alist-org/alist/v3/internal/offline_download/http"
_ "github.com/alist-org/alist/v3/internal/offline_download/pikpak"
_ "github.com/alist-org/alist/v3/internal/offline_download/qbit"
+ _ "github.com/alist-org/alist/v3/internal/offline_download/thunder"
+ _ "github.com/alist-org/alist/v3/internal/offline_download/transmission"
)
diff --git a/internal/offline_download/aria2/aria2.go b/internal/offline_download/aria2/aria2.go
index d22b32f9..fb212b35 100644
--- a/internal/offline_download/aria2/aria2.go
+++ b/internal/offline_download/aria2/aria2.go
@@ -82,7 +82,7 @@ func (a *Aria2) Status(task *tool.DownloadTask) (*tool.Status, error) {
if err != nil {
return nil, err
}
- total, err := strconv.ParseUint(info.TotalLength, 10, 64)
+ total, err := strconv.ParseInt(info.TotalLength, 10, 64)
if err != nil {
total = 0
}
@@ -91,8 +91,9 @@ func (a *Aria2) Status(task *tool.DownloadTask) (*tool.Status, error) {
downloaded = 0
}
s := &tool.Status{
- Completed: info.Status == "complete",
- Err: err,
+ Completed: info.Status == "complete",
+ Err: err,
+ TotalBytes: total,
}
s.Progress = float64(downloaded) / float64(total) * 100
if len(info.FollowedBy) != 0 {
diff --git a/internal/offline_download/http/client.go b/internal/offline_download/http/client.go
index 6f22fcf7..9b83400e 100644
--- a/internal/offline_download/http/client.go
+++ b/internal/offline_download/http/client.go
@@ -83,6 +83,7 @@ func (s SimpleHttp) Run(task *tool.DownloadTask) error {
}
defer file.Close()
fileSize := resp.ContentLength
+ task.SetTotalBytes(fileSize)
err = utils.CopyWithCtx(task.Ctx(), file, resp.Body, fileSize, task.SetProgress)
return err
}
diff --git a/internal/offline_download/pikpak/pikpak.go b/internal/offline_download/pikpak/pikpak.go
index 618b1442..8fdfb340 100644
--- a/internal/offline_download/pikpak/pikpak.go
+++ b/internal/offline_download/pikpak/pikpak.go
@@ -3,6 +3,9 @@ package pikpak
import (
"context"
"fmt"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "strconv"
"github.com/alist-org/alist/v3/drivers/pikpak"
"github.com/alist-org/alist/v3/internal/errs"
@@ -16,7 +19,7 @@ type PikPak struct {
}
func (p *PikPak) Name() string {
- return "pikpak"
+ return "PikPak"
}
func (p *PikPak) Items() []model.SettingItem {
@@ -33,13 +36,23 @@ func (p *PikPak) Init() (string, error) {
}
func (p *PikPak) IsReady() bool {
+ tempDir := setting.GetStr(conf.PikPakTempDir)
+ if tempDir == "" {
+ return false
+ }
+ storage, _, err := op.GetStorageAndActualPath(tempDir)
+ if err != nil {
+ return false
+ }
+ if _, ok := storage.(*pikpak.PikPak); !ok {
+ return false
+ }
return true
}
func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) {
// 添加新任务刷新缓存
p.refreshTaskCache = true
- // args.TempDir 已经被修改为了 DstDirPath
storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir)
if err != nil {
return "", err
@@ -50,6 +63,11 @@ func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) {
}
ctx := context.Background()
+
+ if err := op.MakeDir(ctx, storage, actualPath); err != nil {
+ return "", err
+ }
+
parentDir, err := op.GetUnwrap(ctx, storage, actualPath)
if err != nil {
return "", err
@@ -64,7 +82,7 @@ func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) {
}
func (p *PikPak) Remove(task *tool.DownloadTask) error {
- storage, _, err := op.GetStorageAndActualPath(task.DstDirPath)
+ storage, _, err := op.GetStorageAndActualPath(task.TempDir)
if err != nil {
return err
}
@@ -81,7 +99,7 @@ func (p *PikPak) Remove(task *tool.DownloadTask) error {
}
func (p *PikPak) Status(task *tool.DownloadTask) (*tool.Status, error) {
- storage, _, err := op.GetStorageAndActualPath(task.DstDirPath)
+ storage, _, err := op.GetStorageAndActualPath(task.TempDir)
if err != nil {
return nil, err
}
@@ -105,6 +123,10 @@ func (p *PikPak) Status(task *tool.DownloadTask) (*tool.Status, error) {
s.Progress = float64(t.Progress)
s.Status = t.Message
s.Completed = (t.Phase == "PHASE_TYPE_COMPLETE")
+ s.TotalBytes, err = strconv.ParseInt(t.FileSize, 10, 64)
+ if err != nil {
+ s.TotalBytes = 0
+ }
if t.Phase == "PHASE_TYPE_ERROR" {
s.Err = fmt.Errorf(t.Message)
}
diff --git a/internal/offline_download/qbit/qbit.go b/internal/offline_download/qbit/qbit.go
index 807ebfef..458de03f 100644
--- a/internal/offline_download/qbit/qbit.go
+++ b/internal/offline_download/qbit/qbit.go
@@ -64,6 +64,7 @@ func (a *QBittorrent) Status(task *tool.DownloadTask) (*tool.Status, error) {
return nil, err
}
s := &tool.Status{}
+ s.TotalBytes = info.Size
s.Progress = float64(info.Completed) / float64(info.Size) * 100
switch info.State {
case qbittorrent.UPLOADING, qbittorrent.PAUSEDUP, qbittorrent.QUEUEDUP, qbittorrent.STALLEDUP, qbittorrent.FORCEDUP, qbittorrent.CHECKINGUP:
diff --git a/internal/offline_download/thunder/thunder.go b/internal/offline_download/thunder/thunder.go
new file mode 100644
index 00000000..81b94861
--- /dev/null
+++ b/internal/offline_download/thunder/thunder.go
@@ -0,0 +1,143 @@
+package thunder
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "strconv"
+
+ "github.com/alist-org/alist/v3/drivers/thunder"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/offline_download/tool"
+ "github.com/alist-org/alist/v3/internal/op"
+)
+
+type Thunder struct {
+ refreshTaskCache bool
+}
+
+func (t *Thunder) Name() string {
+ return "Thunder"
+}
+
+func (t *Thunder) Items() []model.SettingItem {
+ return nil
+}
+
+func (t *Thunder) Run(task *tool.DownloadTask) error {
+ return errs.NotSupport
+}
+
+func (t *Thunder) Init() (string, error) {
+ t.refreshTaskCache = false
+ return "ok", nil
+}
+
+func (t *Thunder) IsReady() bool {
+ tempDir := setting.GetStr(conf.ThunderTempDir)
+ if tempDir == "" {
+ return false
+ }
+ storage, _, err := op.GetStorageAndActualPath(tempDir)
+ if err != nil {
+ return false
+ }
+ if _, ok := storage.(*thunder.Thunder); !ok {
+ return false
+ }
+ return true
+}
+
+func (t *Thunder) AddURL(args *tool.AddUrlArgs) (string, error) {
+ // 添加新任务刷新缓存
+ t.refreshTaskCache = true
+ storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir)
+ if err != nil {
+ return "", err
+ }
+ thunderDriver, ok := storage.(*thunder.Thunder)
+ if !ok {
+ return "", fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported")
+ }
+
+ ctx := context.Background()
+
+ if err := op.MakeDir(ctx, storage, actualPath); err != nil {
+ return "", err
+ }
+
+ parentDir, err := op.GetUnwrap(ctx, storage, actualPath)
+ if err != nil {
+ return "", err
+ }
+
+ task, err := thunderDriver.OfflineDownload(ctx, args.Url, parentDir, "")
+ if err != nil {
+ return "", fmt.Errorf("failed to add offline download task: %w", err)
+ }
+
+ return task.ID, nil
+}
+
+func (t *Thunder) Remove(task *tool.DownloadTask) error {
+ storage, _, err := op.GetStorageAndActualPath(task.TempDir)
+ if err != nil {
+ return err
+ }
+ thunderDriver, ok := storage.(*thunder.Thunder)
+ if !ok {
+ return fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported")
+ }
+ ctx := context.Background()
+ err = thunderDriver.DeleteOfflineTasks(ctx, []string{task.GID}, false)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *Thunder) Status(task *tool.DownloadTask) (*tool.Status, error) {
+ storage, _, err := op.GetStorageAndActualPath(task.TempDir)
+ if err != nil {
+ return nil, err
+ }
+ thunderDriver, ok := storage.(*thunder.Thunder)
+ if !ok {
+ return nil, fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported")
+ }
+ tasks, err := t.GetTasks(thunderDriver)
+ if err != nil {
+ return nil, err
+ }
+ s := &tool.Status{
+ Progress: 0,
+ NewGID: "",
+ Completed: false,
+ Status: "the task has been deleted",
+ Err: nil,
+ }
+ for _, t := range tasks {
+ if t.ID == task.GID {
+ s.Progress = float64(t.Progress)
+ s.Status = t.Message
+ s.Completed = (t.Phase == "PHASE_TYPE_COMPLETE")
+ s.TotalBytes, err = strconv.ParseInt(t.FileSize, 10, 64)
+ if err != nil {
+ s.TotalBytes = 0
+ }
+ if t.Phase == "PHASE_TYPE_ERROR" {
+ s.Err = errors.New(t.Message)
+ }
+ return s, nil
+ }
+ }
+ s.Err = fmt.Errorf("the task has been deleted")
+ return s, nil
+}
+
+func init() {
+ tool.Tools.Add(&Thunder{})
+}
diff --git a/internal/offline_download/thunder/util.go b/internal/offline_download/thunder/util.go
new file mode 100644
index 00000000..ea400f32
--- /dev/null
+++ b/internal/offline_download/thunder/util.go
@@ -0,0 +1,42 @@
+package thunder
+
+import (
+ "context"
+ "time"
+
+ "github.com/Xhofe/go-cache"
+ "github.com/alist-org/alist/v3/drivers/thunder"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/pkg/singleflight"
+)
+
+var taskCache = cache.NewMemCache(cache.WithShards[[]thunder.OfflineTask](16))
+var taskG singleflight.Group[[]thunder.OfflineTask]
+
+func (t *Thunder) GetTasks(thunderDriver *thunder.Thunder) ([]thunder.OfflineTask, error) {
+ key := op.Key(thunderDriver, "/drive/v1/task")
+ if !t.refreshTaskCache {
+ if tasks, ok := taskCache.Get(key); ok {
+ return tasks, nil
+ }
+ }
+ t.refreshTaskCache = false
+ tasks, err, _ := taskG.Do(key, func() ([]thunder.OfflineTask, error) {
+ ctx := context.Background()
+ tasks, err := thunderDriver.OfflineList(ctx, "")
+ if err != nil {
+ return nil, err
+ }
+ // 添加缓存 10s
+ if len(tasks) > 0 {
+ taskCache.Set(key, tasks, cache.WithEx[[]thunder.OfflineTask](time.Second*10))
+ } else {
+ taskCache.Del(key)
+ }
+ return tasks, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return tasks, nil
+}
diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go
index c7c5c781..d64e43e8 100644
--- a/internal/offline_download/tool/add.go
+++ b/internal/offline_download/tool/add.go
@@ -2,14 +2,22 @@ package tool
import (
"context"
+ "net/url"
+ stdpath "path"
"path/filepath"
+ _115 "github.com/alist-org/alist/v3/drivers/115"
+ "github.com/alist-org/alist/v3/drivers/pikpak"
+ "github.com/alist-org/alist/v3/drivers/thunder"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/fs"
+ "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "github.com/alist-org/alist/v3/internal/task"
"github.com/google/uuid"
"github.com/pkg/errors"
- "github.com/xhofe/tache"
)
type DeletePolicy string
@@ -28,19 +36,7 @@ type AddURLArgs struct {
DeletePolicy DeletePolicy
}
-func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) {
- // get tool
- tool, err := Tools.Get(args.Tool)
- if err != nil {
- return nil, errors.Wrapf(err, "failed get tool")
- }
- // check tool is ready
- if !tool.IsReady() {
- // try to init tool
- if _, err := tool.Init(); err != nil {
- return nil, errors.Wrapf(err, "failed init tool %s", args.Tool)
- }
- }
+func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, error) {
// check storage
storage, dstDirActualPath, err := op.GetStorageAndActualPath(args.DstDirPath)
if err != nil {
@@ -62,23 +58,58 @@ func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) {
return nil, errors.WithStack(errs.NotFolder)
}
}
+ // try putting url
+ if args.Tool == "SimpleHttp" {
+ err = tryPutUrl(ctx, args.DstDirPath, args.URL)
+ if err == nil || !errors.Is(err, errs.NotImplement) {
+ return nil, err
+ }
+ }
+
+ // get tool
+ tool, err := Tools.Get(args.Tool)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed get tool")
+ }
+ // check tool is ready
+ if !tool.IsReady() {
+ // try to init tool
+ if _, err := tool.Init(); err != nil {
+ return nil, errors.Wrapf(err, "failed init tool %s", args.Tool)
+ }
+ }
uid := uuid.NewString()
tempDir := filepath.Join(conf.Conf.TempDir, args.Tool, uid)
deletePolicy := args.DeletePolicy
+ // 如果当前 storage 是对应网盘,则直接下载到目标路径,无需转存
switch args.Tool {
case "115 Cloud":
- tempDir = args.DstDirPath
- // 防止将下载好的文件删除
- deletePolicy = DeleteNever
- case "pikpak":
- tempDir = args.DstDirPath
- // 防止将下载好的文件删除
- deletePolicy = DeleteNever
+ if _, ok := storage.(*_115.Pan115); ok {
+ tempDir = args.DstDirPath
+ } else {
+ tempDir = filepath.Join(setting.GetStr(conf.Pan115TempDir), uid)
+ }
+ case "PikPak":
+ if _, ok := storage.(*pikpak.PikPak); ok {
+ tempDir = args.DstDirPath
+ } else {
+ tempDir = filepath.Join(setting.GetStr(conf.PikPakTempDir), uid)
+ }
+ case "Thunder":
+ if _, ok := storage.(*thunder.Thunder); ok {
+ tempDir = args.DstDirPath
+ } else {
+ tempDir = filepath.Join(setting.GetStr(conf.ThunderTempDir), uid)
+ }
}
-
+
+ taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
t := &DownloadTask{
+ TaskExtension: task.TaskExtension{
+ Creator: taskCreator,
+ },
Url: args.URL,
DstDirPath: args.DstDirPath,
TempDir: tempDir,
@@ -89,3 +120,14 @@ func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) {
DownloadTaskManager.Add(t)
return t, nil
}
+
+func tryPutUrl(ctx context.Context, path, urlStr string) error {
+ var dstName string
+ u, err := url.Parse(urlStr)
+ if err == nil {
+ dstName = stdpath.Base(u.Path)
+ } else {
+ dstName = "UnnamedURL"
+ }
+ return fs.PutURL(ctx, path, dstName, urlStr)
+}
diff --git a/internal/offline_download/tool/all_test.go b/internal/offline_download/tool/all_test.go
deleted file mode 100644
index 27da5e32..00000000
--- a/internal/offline_download/tool/all_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package tool_test
-
-import (
- "testing"
-
- "github.com/alist-org/alist/v3/internal/offline_download/tool"
-)
-
-func TestGetFiles(t *testing.T) {
- files, err := tool.GetFiles("..")
- if err != nil {
- t.Fatal(err)
- }
- for _, file := range files {
- t.Log(file.Name, file.Size, file.Path, file.Modified)
- }
-}
diff --git a/internal/offline_download/tool/base.go b/internal/offline_download/tool/base.go
index 3b9fb07a..b14169f8 100644
--- a/internal/offline_download/tool/base.go
+++ b/internal/offline_download/tool/base.go
@@ -1,10 +1,6 @@
package tool
import (
- "io"
- "os"
- "time"
-
"github.com/alist-org/alist/v3/internal/model"
)
@@ -16,11 +12,12 @@ type AddUrlArgs struct {
}
type Status struct {
- Progress float64
- NewGID string
- Completed bool
- Status string
- Err error
+ TotalBytes int64
+ Progress float64
+ NewGID string
+ Completed bool
+ Status string
+ Err error
}
type Tool interface {
@@ -39,28 +36,3 @@ type Tool interface {
// Run for simple http download
Run(task *DownloadTask) error
}
-
-type GetFileser interface {
- // GetFiles return the files of the download task, if nil, means walk the temp dir to get the files
- GetFiles(task *DownloadTask) []File
-}
-
-type File struct {
- // ReadCloser for http client
- ReadCloser io.ReadCloser
- Name string
- Size int64
- Path string
- Modified time.Time
-}
-
-func (f *File) GetReadCloser() (io.ReadCloser, error) {
- if f.ReadCloser != nil {
- return f.ReadCloser, nil
- }
- file, err := os.Open(f.Path)
- if err != nil {
- return nil, err
- }
- return file, nil
-}
diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go
index 4cc86a26..42b2dbfb 100644
--- a/internal/offline_download/tool/download.go
+++ b/internal/offline_download/tool/download.go
@@ -7,13 +7,14 @@ import (
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/setting"
+ "github.com/alist-org/alist/v3/internal/task"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/xhofe/tache"
)
type DownloadTask struct {
- tache.Base
+ task.TaskExtension
Url string `json:"url"`
DstDirPath string `json:"dst_dir_path"`
TempDir string `json:"temp_dir"`
@@ -27,6 +28,10 @@ type DownloadTask struct {
}
func (t *DownloadTask) Run() error {
+ t.ReinitCtx()
+ t.ClearEndTime()
+ t.SetStartTime(time.Now())
+ defer func() { t.SetEndTime(time.Now()) }()
if t.tool == nil {
tool, err := Tools.Get(t.Toolname)
if err != nil {
@@ -36,7 +41,7 @@ func (t *DownloadTask) Run() error {
}
if err := t.tool.Run(t); !errs.IsNotSupportError(err) {
if err == nil {
- return t.Complete()
+ return t.Transfer()
}
return err
}
@@ -76,7 +81,10 @@ outer:
if err != nil {
return err
}
- if t.tool.Name() == "pikpak" {
+ if t.tool.Name() == "Pikpak" {
+ return nil
+ }
+ if t.tool.Name() == "Thunder" {
return nil
}
if t.tool.Name() == "115 Cloud" {
@@ -101,6 +109,19 @@ outer:
}
}
}
+
+ if t.tool.Name() == "Transmission" {
+ // hack for transmission
+ seedTime := setting.GetInt(conf.TransmissionSeedtime, 0)
+ if seedTime >= 0 {
+ t.Status = "offline download completed, waiting for seeding"
+ <-time.After(time.Minute * time.Duration(seedTime))
+ err := t.tool.Remove(t)
+ if err != nil {
+ log.Errorln(err.Error())
+ }
+ }
+ }
return nil
}
@@ -117,6 +138,7 @@ func (t *DownloadTask) Update() (bool, error) {
}
t.callStatusRetried = 0
t.SetProgress(info.Progress)
+ t.SetTotalBytes(info.TotalBytes)
t.Status = fmt.Sprintf("[%s]: %s", t.tool.Name(), info.Status)
if info.NewGID != "" {
log.Debugf("followen by: %+v", info.NewGID)
@@ -125,7 +147,7 @@ func (t *DownloadTask) Update() (bool, error) {
}
// if download completed
if info.Completed {
- err := t.Complete()
+ err := t.Transfer()
return true, errors.WithMessage(err, "failed to transfer file")
}
// if download failed
@@ -135,37 +157,16 @@ func (t *DownloadTask) Update() (bool, error) {
return false, nil
}
-func (t *DownloadTask) Complete() error {
- var (
- files []File
- err error
- )
- if t.tool.Name() == "pikpak" {
- return nil
- }
- if t.tool.Name() == "115 Cloud" {
- return nil
- }
- if getFileser, ok := t.tool.(GetFileser); ok {
- files = getFileser.GetFiles(t)
- } else {
- files, err = GetFiles(t.TempDir)
- if err != nil {
- return errors.Wrapf(err, "failed to get files")
+func (t *DownloadTask) Transfer() error {
+ toolName := t.tool.Name()
+ if toolName == "115 Cloud" || toolName == "PikPak" || toolName == "Thunder" {
+ // 如果不是直接下载到目标路径,则进行转存
+ if t.TempDir != t.DstDirPath {
+ return transferObj(t.Ctx(), t.TempDir, t.DstDirPath, t.DeletePolicy)
}
+ return nil
}
- // upload files
- for i := range files {
- file := files[i]
- TransferTaskManager.Add(&TransferTask{
- file: file,
- DstDirPath: t.DstDirPath,
- TempDir: t.TempDir,
- DeletePolicy: t.DeletePolicy,
- FileDir: file.Path,
- })
- }
- return nil
+ return transferStd(t.Ctx(), t.TempDir, t.DstDirPath, t.DeletePolicy)
}
func (t *DownloadTask) GetName() string {
diff --git a/internal/offline_download/tool/tools.go b/internal/offline_download/tool/tools.go
index 9de7d526..4a31ac7f 100644
--- a/internal/offline_download/tool/tools.go
+++ b/internal/offline_download/tool/tools.go
@@ -3,6 +3,7 @@ package tool
import (
"fmt"
"github.com/alist-org/alist/v3/internal/model"
+ "sort"
)
var (
@@ -25,8 +26,11 @@ func (t ToolsManager) Add(tool Tool) {
func (t ToolsManager) Names() []string {
names := make([]string, 0, len(t))
for name := range t {
- names = append(names, name)
+ if tool, err := t.Get(name); err == nil && tool.IsReady() {
+ names = append(names, name)
+ }
}
+ sort.Strings(names)
return names
}
diff --git a/internal/offline_download/tool/transfer.go b/internal/offline_download/tool/transfer.go
index 3744c7b5..1d5ece61 100644
--- a/internal/offline_download/tool/transfer.go
+++ b/internal/offline_download/tool/transfer.go
@@ -1,88 +1,72 @@
package tool
import (
+ "context"
"fmt"
- "os"
- "path/filepath"
-
+ "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/internal/task"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/xhofe/tache"
+ "net/http"
+ "os"
+ stdpath "path"
+ "path/filepath"
+ "time"
)
type TransferTask struct {
- tache.Base
- FileDir string `json:"file_dir"`
- DstDirPath string `json:"dst_dir_path"`
- TempDir string `json:"temp_dir"`
- DeletePolicy DeletePolicy `json:"delete_policy"`
- file File
+ task.TaskExtension
+ Status string `json:"-"` //don't save status to save space
+ SrcObjPath string `json:"src_obj_path"`
+ DstDirPath string `json:"dst_dir_path"`
+ SrcStorage driver.Driver `json:"-"`
+ DstStorage driver.Driver `json:"-"`
+ SrcStorageMp string `json:"src_storage_mp"`
+ DstStorageMp string `json:"dst_storage_mp"`
+ DeletePolicy DeletePolicy `json:"delete_policy"`
}
func (t *TransferTask) Run() error {
- // check dstDir again
- var err error
- if (t.file == File{}) {
- t.file, err = GetFile(t.FileDir)
- if err != nil {
- return errors.Wrapf(err, "failed to get file %s", t.FileDir)
- }
+ t.ReinitCtx()
+ t.ClearEndTime()
+ t.SetStartTime(time.Now())
+ defer func() { t.SetEndTime(time.Now()) }()
+ if t.SrcStorage == nil {
+ return transferStdPath(t)
+ } else {
+ return transferObjPath(t)
}
- storage, dstDirActualPath, err := op.GetStorageAndActualPath(t.DstDirPath)
- if err != nil {
- return errors.WithMessage(err, "failed get storage")
- }
- mimetype := utils.GetMimeType(t.file.Path)
- rc, err := t.file.GetReadCloser()
- if err != nil {
- return errors.Wrapf(err, "failed to open file %s", t.file.Path)
- }
- s := &stream.FileStream{
- Ctx: nil,
- Obj: &model.Object{
- Name: filepath.Base(t.file.Path),
- Size: t.file.Size,
- Modified: t.file.Modified,
- IsFolder: false,
- },
- Reader: rc,
- Mimetype: mimetype,
- Closers: utils.NewClosers(rc),
- }
- relDir, err := filepath.Rel(t.TempDir, filepath.Dir(t.file.Path))
- if err != nil {
- log.Errorf("find relation directory error: %v", err)
- }
- newDistDir := filepath.Join(dstDirActualPath, relDir)
- return op.Put(t.Ctx(), storage, newDistDir, s, t.SetProgress)
}
func (t *TransferTask) GetName() string {
- return fmt.Sprintf("transfer %s to [%s]", t.file.Path, t.DstDirPath)
+ return fmt.Sprintf("transfer [%s](%s) to [%s](%s)", t.SrcStorageMp, t.SrcObjPath, t.DstStorageMp, t.DstDirPath)
}
func (t *TransferTask) GetStatus() string {
- return "transferring"
+ return t.Status
}
func (t *TransferTask) OnSucceeded() {
if t.DeletePolicy == DeleteOnUploadSucceed || t.DeletePolicy == DeleteAlways {
- err := os.Remove(t.file.Path)
- if err != nil {
- log.Errorf("failed to delete file %s, error: %s", t.file.Path, err.Error())
+ if t.SrcStorage == nil {
+ removeStdTemp(t)
+ } else {
+ removeObjTemp(t)
}
}
}
func (t *TransferTask) OnFailed() {
if t.DeletePolicy == DeleteOnUploadFailed || t.DeletePolicy == DeleteAlways {
- err := os.Remove(t.file.Path)
- if err != nil {
- log.Errorf("failed to delete file %s, error: %s", t.file.Path, err.Error())
+ if t.SrcStorage == nil {
+ removeStdTemp(t)
+ } else {
+ removeObjTemp(t)
}
}
}
@@ -90,3 +74,202 @@ func (t *TransferTask) OnFailed() {
var (
TransferTaskManager *tache.Manager[*TransferTask]
)
+
+func transferStd(ctx context.Context, tempDir, dstDirPath string, deletePolicy DeletePolicy) error {
+ dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
+ if err != nil {
+ return errors.WithMessage(err, "failed get dst storage")
+ }
+ entries, err := os.ReadDir(tempDir)
+ if err != nil {
+ return err
+ }
+ taskCreator, _ := ctx.Value("user").(*model.User)
+ for _, entry := range entries {
+ t := &TransferTask{
+ TaskExtension: task.TaskExtension{
+ Creator: taskCreator,
+ },
+ SrcObjPath: stdpath.Join(tempDir, entry.Name()),
+ DstDirPath: dstDirActualPath,
+ DstStorage: dstStorage,
+ DstStorageMp: dstStorage.GetStorage().MountPath,
+ DeletePolicy: deletePolicy,
+ }
+ TransferTaskManager.Add(t)
+ }
+ return nil
+}
+
+func transferStdPath(t *TransferTask) error {
+ t.Status = "getting src object"
+ info, err := os.Stat(t.SrcObjPath)
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ t.Status = "src object is dir, listing objs"
+ entries, err := os.ReadDir(t.SrcObjPath)
+ if err != nil {
+ return err
+ }
+ for _, entry := range entries {
+ srcRawPath := stdpath.Join(t.SrcObjPath, entry.Name())
+ dstObjPath := stdpath.Join(t.DstDirPath, info.Name())
+ t := &TransferTask{
+ TaskExtension: task.TaskExtension{
+ Creator: t.Creator,
+ },
+ SrcObjPath: srcRawPath,
+ DstDirPath: dstObjPath,
+ DstStorage: t.DstStorage,
+ SrcStorageMp: t.SrcStorageMp,
+ DstStorageMp: t.DstStorageMp,
+ DeletePolicy: t.DeletePolicy,
+ }
+ TransferTaskManager.Add(t)
+ }
+ t.Status = "src object is dir, added all transfer tasks of files"
+ return nil
+ }
+ return transferStdFile(t)
+}
+
+func transferStdFile(t *TransferTask) error {
+ rc, err := os.Open(t.SrcObjPath)
+ if err != nil {
+ return errors.Wrapf(err, "failed to open file %s", t.SrcObjPath)
+ }
+ info, err := rc.Stat()
+ if err != nil {
+ return errors.Wrapf(err, "failed to get file %s", t.SrcObjPath)
+ }
+ mimetype := utils.GetMimeType(t.SrcObjPath)
+ s := &stream.FileStream{
+ Ctx: nil,
+ Obj: &model.Object{
+ Name: filepath.Base(t.SrcObjPath),
+ Size: info.Size(),
+ Modified: info.ModTime(),
+ IsFolder: false,
+ },
+ Reader: rc,
+ Mimetype: mimetype,
+ Closers: utils.NewClosers(rc),
+ }
+ t.SetTotalBytes(info.Size())
+ return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, s, t.SetProgress)
+}
+
+func removeStdTemp(t *TransferTask) {
+ info, err := os.Stat(t.SrcObjPath)
+ if err != nil || info.IsDir() {
+ return
+ }
+ if err := os.Remove(t.SrcObjPath); err != nil {
+ log.Errorf("failed to delete temp file %s, error: %s", t.SrcObjPath, err.Error())
+ }
+}
+
+func transferObj(ctx context.Context, tempDir, dstDirPath string, deletePolicy DeletePolicy) error {
+ srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(tempDir)
+ if err != nil {
+ return errors.WithMessage(err, "failed get src storage")
+ }
+ dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
+ if err != nil {
+ return errors.WithMessage(err, "failed get dst storage")
+ }
+ objs, err := op.List(ctx, srcStorage, srcObjActualPath, model.ListArgs{})
+ if err != nil {
+ return errors.WithMessagef(err, "failed list src [%s] objs", tempDir)
+ }
+ taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
+ for _, obj := range objs {
+ t := &TransferTask{
+ TaskExtension: task.TaskExtension{
+ Creator: taskCreator,
+ },
+ SrcObjPath: stdpath.Join(srcObjActualPath, obj.GetName()),
+ DstDirPath: dstDirActualPath,
+ SrcStorage: srcStorage,
+ DstStorage: dstStorage,
+ SrcStorageMp: srcStorage.GetStorage().MountPath,
+ DstStorageMp: dstStorage.GetStorage().MountPath,
+ DeletePolicy: deletePolicy,
+ }
+ TransferTaskManager.Add(t)
+ }
+ return nil
+}
+
+func transferObjPath(t *TransferTask) error {
+ t.Status = "getting src object"
+ srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath)
+ if err != nil {
+ return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath)
+ }
+ if srcObj.IsDir() {
+ t.Status = "src object is dir, listing objs"
+ objs, err := op.List(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.ListArgs{})
+ if err != nil {
+ return errors.WithMessagef(err, "failed list src [%s] objs", t.SrcObjPath)
+ }
+ for _, obj := range objs {
+ if utils.IsCanceled(t.Ctx()) {
+ return nil
+ }
+ srcObjPath := stdpath.Join(t.SrcObjPath, obj.GetName())
+ dstObjPath := stdpath.Join(t.DstDirPath, srcObj.GetName())
+ TransferTaskManager.Add(&TransferTask{
+ TaskExtension: task.TaskExtension{
+ Creator: t.Creator,
+ },
+ SrcObjPath: srcObjPath,
+ DstDirPath: dstObjPath,
+ SrcStorage: t.SrcStorage,
+ DstStorage: t.DstStorage,
+ SrcStorageMp: t.SrcStorageMp,
+ DstStorageMp: t.DstStorageMp,
+ DeletePolicy: t.DeletePolicy,
+ })
+ }
+ t.Status = "src object is dir, added all transfer tasks of objs"
+ return nil
+ }
+ return transferObjFile(t)
+}
+
+func transferObjFile(t *TransferTask) error {
+ srcFile, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath)
+ if err != nil {
+ return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath)
+ }
+ link, _, err := op.Link(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.LinkArgs{
+ Header: http.Header{},
+ })
+ if err != nil {
+ return errors.WithMessagef(err, "failed get [%s] link", t.SrcObjPath)
+ }
+ fs := stream.FileStream{
+ Obj: srcFile,
+ Ctx: t.Ctx(),
+ }
+ // any link provided is seekable
+ ss, err := stream.NewSeekableStream(fs, link)
+ if err != nil {
+ return errors.WithMessagef(err, "failed get [%s] stream", t.SrcObjPath)
+ }
+ t.SetTotalBytes(srcFile.GetSize())
+ return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, ss, t.SetProgress)
+}
+
+func removeObjTemp(t *TransferTask) {
+ srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath)
+ if err != nil || srcObj.IsDir() {
+ return
+ }
+ if err := op.Remove(t.Ctx(), t.SrcStorage, t.SrcObjPath); err != nil {
+ log.Errorf("failed to delete temp obj %s, error: %s", t.SrcObjPath, err.Error())
+ }
+}
diff --git a/internal/offline_download/tool/util.go b/internal/offline_download/tool/util.go
deleted file mode 100644
index b2c6ec02..00000000
--- a/internal/offline_download/tool/util.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package tool
-
-import (
- "os"
- "path/filepath"
-)
-
-func GetFiles(dir string) ([]File, error) {
- var files []File
- err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !info.IsDir() {
- files = append(files, File{
- Name: info.Name(),
- Size: info.Size(),
- Path: path,
- Modified: info.ModTime(),
- })
- }
- return nil
- })
- if err != nil {
- return nil, err
- }
- return files, nil
-}
-
-func GetFile(path string) (File, error) {
- info, err := os.Stat(path)
- if err != nil {
- return File{}, err
- }
- return File{
- Name: info.Name(),
- Size: info.Size(),
- Path: path,
- Modified: info.ModTime(),
- }, nil
-}
diff --git a/internal/offline_download/transmission/client.go b/internal/offline_download/transmission/client.go
new file mode 100644
index 00000000..ae136009
--- /dev/null
+++ b/internal/offline_download/transmission/client.go
@@ -0,0 +1,177 @@
+package transmission
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/offline_download/tool"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/hekmon/transmissionrpc/v3"
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+)
+
+type Transmission struct {
+ client *transmissionrpc.Client
+}
+
+func (t *Transmission) Run(task *tool.DownloadTask) error {
+ return errs.NotSupport
+}
+
+func (t *Transmission) Name() string {
+ return "Transmission"
+}
+
+func (t *Transmission) Items() []model.SettingItem {
+ // transmission settings
+ return []model.SettingItem{
+ {Key: conf.TransmissionUri, Value: "http://localhost:9091/transmission/rpc", Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
+ {Key: conf.TransmissionSeedtime, Value: "0", Type: conf.TypeNumber, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
+ }
+}
+
+func (t *Transmission) Init() (string, error) {
+ t.client = nil
+ uri := setting.GetStr(conf.TransmissionUri)
+ endpoint, err := url.Parse(uri)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to init transmission client")
+ }
+ c, err := transmissionrpc.New(endpoint, nil)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to init transmission client")
+ }
+
+ ok, serverVersion, serverMinimumVersion, err := c.RPCVersion(context.Background())
+ if err != nil {
+ return "", errors.Wrapf(err, "failed get transmission version")
+ }
+
+ if !ok {
+ return "", fmt.Errorf("remote transmission RPC version (v%d) is incompatible with the transmission library (v%d): remote needs at least v%d",
+ serverVersion, transmissionrpc.RPCVersion, serverMinimumVersion)
+ }
+
+ t.client = c
+ log.Infof("remote transmission RPC version (v%d) is compatible with our transmissionrpc library (v%d)\n",
+ serverVersion, transmissionrpc.RPCVersion)
+ log.Infof("using transmission version: %d", serverVersion)
+ return fmt.Sprintf("transmission version: %d", serverVersion), nil
+}
+
+func (t *Transmission) IsReady() bool {
+ return t.client != nil
+}
+
+func (t *Transmission) AddURL(args *tool.AddUrlArgs) (string, error) {
+ endpoint, err := url.Parse(args.Url)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to parse transmission uri")
+ }
+
+ rpcPayload := transmissionrpc.TorrentAddPayload{
+ DownloadDir: &args.TempDir,
+ }
+ // http url for .torrent file
+ if endpoint.Scheme == "http" || endpoint.Scheme == "https" {
+ resp, err := http.Get(args.Url)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to get .torrent file")
+ }
+ defer resp.Body.Close()
+ buffer := new(bytes.Buffer)
+ encoder := base64.NewEncoder(base64.StdEncoding, buffer)
+ // Stream file to the encoder
+ if _, err = utils.CopyWithBuffer(encoder, resp.Body); err != nil {
+ return "", errors.Wrap(err, "can't copy file content into the base64 encoder")
+ }
+ // Flush last bytes
+ if err = encoder.Close(); err != nil {
+ return "", errors.Wrap(err, "can't flush last bytes of the base64 encoder")
+ }
+ // Get the string form
+ b64 := buffer.String()
+ rpcPayload.MetaInfo = &b64
+ } else { // magnet uri
+ rpcPayload.Filename = &args.Url
+ }
+
+ torrent, err := t.client.TorrentAdd(context.TODO(), rpcPayload)
+ if err != nil {
+ return "", err
+ }
+
+ if torrent.ID == nil {
+ return "", fmt.Errorf("failed get torrent ID")
+ }
+ gid := strconv.FormatInt(*torrent.ID, 10)
+ return gid, nil
+}
+
+func (t *Transmission) Remove(task *tool.DownloadTask) error {
+ gid, err := strconv.ParseInt(task.GID, 10, 64)
+ if err != nil {
+ return err
+ }
+ err = t.client.TorrentRemove(context.TODO(), transmissionrpc.TorrentRemovePayload{
+ IDs: []int64{gid},
+ DeleteLocalData: false,
+ })
+ return err
+}
+
+func (t *Transmission) Status(task *tool.DownloadTask) (*tool.Status, error) {
+ gid, err := strconv.ParseInt(task.GID, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ infos, err := t.client.TorrentGetAllFor(context.TODO(), []int64{gid})
+ if err != nil {
+ return nil, err
+ }
+
+ if len(infos) < 1 {
+ return nil, fmt.Errorf("failed get status, wrong gid: %s", task.GID)
+ }
+ info := infos[0]
+
+ s := &tool.Status{
+ Completed: *info.IsFinished,
+ Err: err,
+ }
+ s.Progress = *info.PercentDone * 100
+ s.TotalBytes = int64(*info.SizeWhenDone / 8)
+
+ switch *info.Status {
+ case transmissionrpc.TorrentStatusCheckWait,
+ transmissionrpc.TorrentStatusDownloadWait,
+ transmissionrpc.TorrentStatusCheck,
+ transmissionrpc.TorrentStatusDownload,
+ transmissionrpc.TorrentStatusIsolated:
+ s.Status = "[transmission] " + info.Status.String()
+ case transmissionrpc.TorrentStatusSeedWait,
+ transmissionrpc.TorrentStatusSeed:
+ s.Completed = true
+ case transmissionrpc.TorrentStatusStopped:
+ s.Err = errors.Errorf("[transmission] failed to download %s, status: %s, error: %s", task.GID, info.Status.String(), *info.ErrorString)
+ default:
+ s.Err = errors.Errorf("[transmission] unknown status occurred downloading %s, err: %s", task.GID, *info.ErrorString)
+ }
+ return s, nil
+}
+
+var _ tool.Tool = (*Transmission)(nil)
+
+func init() {
+ tool.Tools.Add(&Transmission{})
+}
diff --git a/internal/op/archive.go b/internal/op/archive.go
new file mode 100644
index 00000000..38b870c7
--- /dev/null
+++ b/internal/op/archive.go
@@ -0,0 +1,518 @@
+package op
+
+import (
+ "context"
+ stderrors "errors"
+ "fmt"
+ "io"
+ stdpath "path"
+ "strings"
+ "time"
+
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/stream"
+
+ "github.com/Xhofe/go-cache"
+ "github.com/alist-org/alist/v3/internal/driver"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/pkg/singleflight"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+)
+
+var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64))
+var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
+
+func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
+ if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+ return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+ }
+ path = utils.FixAndCleanPath(path)
+ key := Key(storage, path)
+ if !args.Refresh {
+ if meta, ok := archiveMetaCache.Get(key); ok {
+ log.Debugf("use cache when get %s archive meta", path)
+ return meta, nil
+ }
+ }
+ fn := func() (*model.ArchiveMetaProvider, error) {
+ _, m, err := getArchiveMeta(ctx, storage, path, args)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err)
+ }
+ if m.Expiration != nil {
+ archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
+ }
+ return m, nil
+ }
+ if storage.Config().OnlyLocal {
+ meta, err := fn()
+ return meta, err
+ }
+ meta, err, _ := archiveMetaG.Do(key, fn)
+ return meta, err
+}
+
+func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, []*stream.SeekableStream, error) {
+ l, obj, err := Link(ctx, storage, path, args)
+ if err != nil {
+ return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
+ }
+ baseName, ext, found := strings.Cut(obj.GetName(), ".")
+ if !found {
+ if l.MFile != nil {
+ _ = l.MFile.Close()
+ }
+ if l.RangeReadCloser != nil {
+ _ = l.RangeReadCloser.Close()
+ }
+ return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
+ }
+ partExt, t, err := tool.GetArchiveTool("." + ext)
+ if err != nil {
+ var e error
+ partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
+ if e != nil {
+ if l.MFile != nil {
+ _ = l.MFile.Close()
+ }
+ if l.RangeReadCloser != nil {
+ _ = l.RangeReadCloser.Close()
+ }
+ return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
+ }
+ }
+ ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
+ if err != nil {
+ if l.MFile != nil {
+ _ = l.MFile.Close()
+ }
+ if l.RangeReadCloser != nil {
+ _ = l.RangeReadCloser.Close()
+ }
+ return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
+ }
+ ret := []*stream.SeekableStream{ss}
+ if partExt == nil {
+ return obj, t, ret, nil
+ } else {
+ index := partExt.SecondPartIndex
+ dir := stdpath.Dir(path)
+ for {
+ p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
+ var o model.Obj
+ l, o, err = Link(ctx, storage, p, args)
+ if err != nil {
+ break
+ }
+ ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l)
+ if err != nil {
+ if l.MFile != nil {
+ _ = l.MFile.Close()
+ }
+ if l.RangeReadCloser != nil {
+ _ = l.RangeReadCloser.Close()
+ }
+ for _, s := range ret {
+ _ = s.Close()
+ }
+ return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
+ }
+ ret = append(ret, ss)
+ index++
+ }
+ return obj, t, ret, nil
+ }
+}
+
+func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
+ storageAr, ok := storage.(driver.ArchiveReader)
+ if ok {
+ obj, err := GetUnwrap(ctx, storage, path)
+ if err != nil {
+ return nil, nil, errors.WithMessage(err, "failed to get file")
+ }
+ if obj.IsDir() {
+ return nil, nil, errors.WithStack(errs.NotFile)
+ }
+ meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs)
+ if !errors.Is(err, errs.NotImplement) {
+ archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}
+ if meta != nil && meta.GetTree() != nil {
+ archiveMetaProvider.Sort = &storage.GetStorage().Sort
+ }
+ if !storage.Config().NoCache {
+ Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
+ archiveMetaProvider.Expiration = &Expiration
+ }
+ return obj, archiveMetaProvider, err
+ }
+ }
+ obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer func() {
+ var e error
+ for _, s := range ss {
+ e = stderrors.Join(e, s.Close())
+ }
+ if e != nil {
+ log.Errorf("failed to close file streamer, %v", e)
+ }
+ }()
+ meta, err := t.GetMeta(ss, args.ArchiveArgs)
+ if err != nil {
+ return nil, nil, err
+ }
+ archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false}
+ if meta.GetTree() != nil {
+ archiveMetaProvider.Sort = &storage.GetStorage().Sort
+ }
+ if !storage.Config().NoCache {
+ Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
+ archiveMetaProvider.Expiration = &Expiration
+ } else if ss[0].Link.MFile == nil {
+ // alias、crypt 驱动
+ archiveMetaProvider.Expiration = ss[0].Link.Expiration
+ }
+ return obj, archiveMetaProvider, err
+}
+
+var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
+var archiveListG singleflight.Group[[]model.Obj]
+
+func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
+ if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+ return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+ }
+ path = utils.FixAndCleanPath(path)
+ metaKey := Key(storage, path)
+ key := stdpath.Join(metaKey, args.InnerPath)
+ if !args.Refresh {
+ if files, ok := archiveListCache.Get(key); ok {
+ log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath)
+ return files, nil
+ }
+ // if meta, ok := archiveMetaCache.Get(metaKey); ok {
+ // log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath)
+ // return getChildrenFromArchiveMeta(meta, args.InnerPath)
+ // }
+ }
+ objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) {
+ obj, files, err := listArchive(ctx, storage, path, args)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to list archive [%s]%s: %+v", path, args.InnerPath, err)
+ }
+ // set path
+ for _, f := range files {
+ if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && obj.GetPath() != "" {
+ s.SetPath(stdpath.Join(obj.GetPath(), args.InnerPath, f.GetName()))
+ }
+ }
+ // warp obj name
+ model.WrapObjsName(files)
+ // sort objs
+ if storage.Config().LocalSort {
+ model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
+ }
+ model.ExtractFolder(files, storage.GetStorage().ExtractFolder)
+ if !storage.Config().NoCache {
+ if len(files) > 0 {
+ log.Debugf("set cache: %s => %+v", key, files)
+ archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
+ } else {
+ log.Debugf("del cache: %s", key)
+ archiveListCache.Del(key)
+ }
+ }
+ return files, nil
+ })
+ return objs, err
+}
+
+func _listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) {
+ storageAr, ok := storage.(driver.ArchiveReader)
+ if ok {
+ obj, err := GetUnwrap(ctx, storage, path)
+ if err != nil {
+ return nil, nil, errors.WithMessage(err, "failed to get file")
+ }
+ if obj.IsDir() {
+ return nil, nil, errors.WithStack(errs.NotFile)
+ }
+ files, err := storageAr.ListArchive(ctx, obj, args.ArchiveInnerArgs)
+ if !errors.Is(err, errs.NotImplement) {
+ return obj, files, err
+ }
+ }
+ obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer func() {
+ var e error
+ for _, s := range ss {
+ e = stderrors.Join(e, s.Close())
+ }
+ if e != nil {
+ log.Errorf("failed to close file streamer, %v", e)
+ }
+ }()
+ files, err := t.List(ss, args.ArchiveInnerArgs)
+ return obj, files, err
+}
+
+func listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) {
+ obj, files, err := _listArchive(ctx, storage, path, args)
+ if errors.Is(err, errs.NotSupport) {
+ var meta model.ArchiveMeta
+ meta, err = GetArchiveMeta(ctx, storage, path, model.ArchiveMetaArgs{
+ ArchiveArgs: args.ArchiveArgs,
+ Refresh: args.Refresh,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ files, err = getChildrenFromArchiveMeta(meta, args.InnerPath)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ if err == nil && obj == nil {
+ obj, err = GetUnwrap(ctx, storage, path)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ return obj, files, err
+}
+
+func getChildrenFromArchiveMeta(meta model.ArchiveMeta, innerPath string) ([]model.Obj, error) {
+ obj := meta.GetTree()
+ if obj == nil {
+ return nil, errors.WithStack(errs.NotImplement)
+ }
+ dirs := splitPath(innerPath)
+ for _, dir := range dirs {
+ var next model.ObjTree
+ for _, c := range obj {
+ if c.GetName() == dir {
+ next = c
+ break
+ }
+ }
+ if next == nil {
+ return nil, errors.WithStack(errs.ObjectNotFound)
+ }
+ if !next.IsDir() || next.GetChildren() == nil {
+ return nil, errors.WithStack(errs.NotFolder)
+ }
+ obj = next.GetChildren()
+ }
+ return utils.SliceConvert(obj, func(src model.ObjTree) (model.Obj, error) {
+ return src, nil
+ })
+}
+
+func splitPath(path string) []string {
+ var parts []string
+ for {
+ dir, file := stdpath.Split(path)
+ if file == "" {
+ break
+ }
+ parts = append([]string{file}, parts...)
+ path = strings.TrimSuffix(dir, "/")
+ }
+ return parts
+}
+
+func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, model.Obj, error) {
+ if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+ return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+ }
+ path = utils.FixAndCleanPath(path)
+ af, err := GetUnwrap(ctx, storage, path)
+ if err != nil {
+ return nil, nil, errors.WithMessage(err, "failed to get file")
+ }
+ if af.IsDir() {
+ return nil, nil, errors.WithStack(errs.NotFile)
+ }
+ if g, ok := storage.(driver.ArchiveGetter); ok {
+ obj, err := g.ArchiveGet(ctx, af, args.ArchiveInnerArgs)
+ if err == nil {
+ return af, model.WrapObjName(obj), nil
+ }
+ }
+
+ if utils.PathEqual(args.InnerPath, "/") {
+ return af, &model.ObjWrapName{
+ Name: RootName,
+ Obj: &model.Object{
+ Name: af.GetName(),
+ Path: af.GetPath(),
+ ID: af.GetID(),
+ Size: af.GetSize(),
+ Modified: af.ModTime(),
+ IsFolder: true,
+ },
+ }, nil
+ }
+
+ innerDir, name := stdpath.Split(args.InnerPath)
+ args.InnerPath = strings.TrimSuffix(innerDir, "/")
+ files, err := ListArchive(ctx, storage, path, args)
+ if err != nil {
+ return nil, nil, errors.WithMessage(err, "failed get parent list")
+ }
+ for _, f := range files {
+ if f.GetName() == name {
+ return af, f, nil
+ }
+ }
+ return nil, nil, errors.WithStack(errs.ObjectNotFound)
+}
+
+type extractLink struct {
+ Link *model.Link
+ Obj model.Obj
+}
+
+var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16))
+var extractG singleflight.Group[*extractLink]
+
+func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
+ if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+ return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+ }
+ key := stdpath.Join(Key(storage, path), args.InnerPath)
+ if link, ok := extractCache.Get(key); ok {
+ return link.Link, link.Obj, nil
+ } else if link, ok := extractCache.Get(key + ":" + args.IP); ok {
+ return link.Link, link.Obj, nil
+ }
+ fn := func() (*extractLink, error) {
+ link, err := driverExtract(ctx, storage, path, args)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed extract archive")
+ }
+ if link.Link.Expiration != nil {
+ if link.Link.IPCacheKey {
+ key = key + ":" + args.IP
+ }
+ extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
+ }
+ return link, nil
+ }
+ if storage.Config().OnlyLocal {
+ link, err := fn()
+ if err != nil {
+ return nil, nil, err
+ }
+ return link.Link, link.Obj, nil
+ }
+ link, err, _ := extractG.Do(key, fn)
+ if err != nil {
+ return nil, nil, err
+ }
+ return link.Link, link.Obj, err
+}
+
+func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) {
+ storageAr, ok := storage.(driver.ArchiveReader)
+ if !ok {
+ return nil, errs.DriverExtractNotSupported
+ }
+ archiveFile, extracted, err := ArchiveGet(ctx, storage, path, model.ArchiveListArgs{
+ ArchiveInnerArgs: args,
+ Refresh: false,
+ })
+ if err != nil {
+ return nil, errors.WithMessage(err, "failed to get file")
+ }
+ if extracted.IsDir() {
+ return nil, errors.WithStack(errs.NotFile)
+ }
+ link, err := storageAr.Extract(ctx, archiveFile, args)
+ return &extractLink{Link: link, Obj: extracted}, err
+}
+
+type streamWithParent struct {
+ rc io.ReadCloser
+ parents []*stream.SeekableStream
+}
+
+func (s *streamWithParent) Read(p []byte) (int, error) {
+ return s.rc.Read(p)
+}
+
+func (s *streamWithParent) Close() error {
+ err := s.rc.Close()
+ for _, ss := range s.parents {
+ err = stderrors.Join(err, ss.Close())
+ }
+ return err
+}
+
+func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
+ _, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
+ if err != nil {
+ return nil, 0, err
+ }
+ rc, size, err := t.Extract(ss, args)
+ if err != nil {
+ var e error
+ for _, s := range ss {
+ e = stderrors.Join(e, s.Close())
+ }
+ if e != nil {
+ log.Errorf("failed to close file streamer, %v", e)
+ err = stderrors.Join(err, e)
+ }
+ return nil, 0, err
+ }
+ return &streamWithParent{rc: rc, parents: ss}, size, nil
+}
+
+func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error {
+ if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+ return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+ }
+ srcPath = utils.FixAndCleanPath(srcPath)
+ dstDirPath = utils.FixAndCleanPath(dstDirPath)
+ srcObj, err := GetUnwrap(ctx, storage, srcPath)
+ if err != nil {
+ return errors.WithMessage(err, "failed to get src object")
+ }
+ dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
+ if err != nil {
+ return errors.WithMessage(err, "failed to get dst dir")
+ }
+
+ switch s := storage.(type) {
+ case driver.ArchiveDecompressResult:
+ var newObjs []model.Obj
+ newObjs, err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
+ if err == nil {
+ if newObjs != nil && len(newObjs) > 0 {
+ for _, newObj := range newObjs {
+ addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
+ }
+ } else if !utils.IsBool(lazyCache...) {
+ ClearCache(storage, dstDirPath)
+ }
+ }
+ case driver.ArchiveDecompress:
+ err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
+ if err == nil && !utils.IsBool(lazyCache...) {
+ ClearCache(storage, dstDirPath)
+ }
+ default:
+ return errs.NotImplement
+ }
+ return errors.WithStack(err)
+}
diff --git a/internal/op/driver.go b/internal/op/driver.go
index 4f10e8e2..41b6f6d4 100644
--- a/internal/op/driver.go
+++ b/internal/op/driver.go
@@ -133,6 +133,12 @@ func getMainItems(config driver.Config) []driver.Item {
Type: conf.TypeSelect,
Options: "front,back",
})
+ items = append(items, driver.Item{
+ Name: "disable_index",
+ Type: conf.TypeBool,
+ Default: "false",
+ Required: true,
+ })
items = append(items, driver.Item{
Name: "enable_sign",
Type: conf.TypeBool,
diff --git a/internal/op/fs.go b/internal/op/fs.go
index e49c941a..64e99335 100644
--- a/internal/op/fs.go
+++ b/internal/op/fs.go
@@ -3,12 +3,14 @@ package op
import (
"context"
stdpath "path"
+ "slices"
"time"
"github.com/Xhofe/go-cache"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/generic_sync"
"github.com/alist-org/alist/v3/pkg/singleflight"
"github.com/alist-org/alist/v3/pkg/utils"
@@ -25,6 +27,12 @@ func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
+ for i, obj := range objs {
+ if obj.GetName() == newObj.GetName() {
+ objs = slices.Delete(objs, i, i+1)
+ break
+ }
+ }
for i, obj := range objs {
if obj.GetName() == oldObj.GetName() {
objs[i] = newObj
@@ -510,6 +518,12 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
log.Errorf("failed to close file streamer, %v", err)
}
}()
+ // UrlTree PUT
+ if storage.GetStorage().Driver == "UrlTree" {
+ var link string
+ dstDirPath, link = urlTreeSplitLineFormPath(stdpath.Join(dstDirPath, file.GetName()))
+ file = &stream.FileStream{Obj: &model.Object{Name: link}}
+ }
// if file exist and size = 0, delete it
dstDirPath = utils.FixAndCleanPath(dstDirPath)
dstPath := stdpath.Join(dstDirPath, file.GetName())
@@ -586,3 +600,43 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
}
return errors.WithStack(err)
}
+
+func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url string, lazyCache ...bool) error {
+ if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
+ return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
+ }
+ dstDirPath = utils.FixAndCleanPath(dstDirPath)
+ _, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName))
+ if err == nil {
+ return errors.New("obj already exists")
+ }
+ err = MakeDir(ctx, storage, dstDirPath)
+ if err != nil {
+ return errors.WithMessagef(err, "failed to put url")
+ }
+ dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
+ if err != nil {
+ return errors.WithMessagef(err, "failed to put url")
+ }
+ switch s := storage.(type) {
+ case driver.PutURLResult:
+ var newObj model.Obj
+ newObj, err = s.PutURL(ctx, dstDir, dstName, url)
+ if err == nil {
+ if newObj != nil {
+ addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
+ } else if !utils.IsBool(lazyCache...) {
+ ClearCache(storage, dstDirPath)
+ }
+ }
+ case driver.PutURL:
+ err = s.PutURL(ctx, dstDir, dstName, url)
+ if err == nil && !utils.IsBool(lazyCache...) {
+ ClearCache(storage, dstDirPath)
+ }
+ default:
+ return errs.NotImplement
+ }
+ log.Debugf("put url [%s](%s) done", dstName, url)
+ return errors.WithStack(err)
+}
diff --git a/internal/op/path.go b/internal/op/path.go
index 27f7e183..912a0000 100644
--- a/internal/op/path.go
+++ b/internal/op/path.go
@@ -2,6 +2,7 @@ package op
import (
"github.com/alist-org/alist/v3/internal/errs"
+ stdpath "path"
"strings"
"github.com/alist-org/alist/v3/internal/driver"
@@ -27,3 +28,30 @@ func GetStorageAndActualPath(rawPath string) (storage driver.Driver, actualPath
actualPath = utils.FixAndCleanPath(strings.TrimPrefix(rawPath, mountPath))
return
}
+
+// urlTreeSplitLineFormPath 分割path中分割真实路径和UrlTree定义字符串
+func urlTreeSplitLineFormPath(path string) (pp string, file string) {
+ // url.PathUnescape 会移除 // ,手动加回去
+ path = strings.Replace(path, "https:/", "https://", 1)
+ path = strings.Replace(path, "http:/", "http://", 1)
+ if strings.Contains(path, ":https:/") || strings.Contains(path, ":http:/") {
+ // URL-Tree模式 /url_tree_drivr/file_name[:size[:time]]:https://example.com/file
+ fPath := strings.SplitN(path, ":", 2)[0]
+ pp, _ = stdpath.Split(fPath)
+ file = path[len(pp):]
+ } else if strings.Contains(path, "/https:/") || strings.Contains(path, "/http:/") {
+ // URL-Tree模式 /url_tree_drivr/https://example.com/file
+ index := strings.Index(path, "/http://")
+ if index == -1 {
+ index = strings.Index(path, "/https://")
+ }
+ pp = path[:index]
+ file = path[index+1:]
+ } else {
+ pp, file = stdpath.Split(path)
+ }
+ if pp == "" {
+ pp = "/"
+ }
+ return
+}
diff --git a/internal/op/setting.go b/internal/op/setting.go
index 83d19c12..36a792b0 100644
--- a/internal/op/setting.go
+++ b/internal/op/setting.go
@@ -26,9 +26,18 @@ var settingGroupCacheF = func(key string, item []model.SettingItem) {
settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour))
}
-func settingCacheUpdate() {
+var settingChangingCallbacks = make([]func(), 0)
+
+func RegisterSettingChangingCallback(f func()) {
+ settingChangingCallbacks = append(settingChangingCallbacks, f)
+}
+
+func SettingCacheUpdate() {
settingCache.Clear()
settingGroupCache.Clear()
+ for _, cb := range settingChangingCallbacks {
+ cb()
+ }
}
func GetPublicSettingsMap() map[string]string {
@@ -167,7 +176,7 @@ func SaveSettingItems(items []model.SettingItem) error {
}
}
if len(errs) < len(items)-len(noHookItems)+1 {
- settingCacheUpdate()
+ SettingCacheUpdate()
}
return utils.MergeErrors(errs...)
}
@@ -181,7 +190,7 @@ func SaveSettingItem(item *model.SettingItem) (err error) {
if err = db.SaveSettingItem(item); err != nil {
return err
}
- settingCacheUpdate()
+ SettingCacheUpdate()
return nil
}
@@ -193,6 +202,6 @@ func DeleteSettingItemByKey(key string) error {
if !old.IsDeprecated() {
return errors.Errorf("setting [%s] is not deprecated", key)
}
- settingCacheUpdate()
+ SettingCacheUpdate()
return db.DeleteSettingItemByKey(key)
}
diff --git a/internal/op/sshkey.go b/internal/op/sshkey.go
new file mode 100644
index 00000000..139698e6
--- /dev/null
+++ b/internal/op/sshkey.go
@@ -0,0 +1,47 @@
+package op
+
+import (
+ "github.com/alist-org/alist/v3/internal/db"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/ssh"
+ "time"
+)
+
+func CreateSSHPublicKey(k *model.SSHPublicKey) (error, bool) {
+ _, err := db.GetSSHPublicKeyByUserTitle(k.UserId, k.Title)
+ if err == nil {
+ return errors.New("key with the same title already exists"), true
+ }
+ pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.KeyStr))
+ if err != nil {
+ return err, false
+ }
+ k.Fingerprint = ssh.FingerprintSHA256(pubKey)
+ k.AddedTime = time.Now()
+ k.LastUsedTime = k.AddedTime
+ return db.CreateSSHPublicKey(k), true
+}
+
+func GetSSHPublicKeyByUserId(userId uint, pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) {
+ return db.GetSSHPublicKeyByUserId(userId, pageIndex, pageSize)
+}
+
+func GetSSHPublicKeyByIdAndUserId(id uint, userId uint) (*model.SSHPublicKey, error) {
+ key, err := db.GetSSHPublicKeyById(id)
+ if err != nil {
+ return nil, err
+ }
+ if key.UserId != userId {
+ return nil, errors.Wrapf(err, "failed get old key")
+ }
+ return key, nil
+}
+
+func UpdateSSHPublicKey(k *model.SSHPublicKey) error {
+ return db.UpdateSSHPublicKey(k)
+}
+
+func DeleteSSHPublicKeyById(keyId uint) error {
+ return db.DeleteSSHPublicKeyById(keyId)
+}
diff --git a/internal/op/storage.go b/internal/op/storage.go
index 6790a8df..f957f95b 100644
--- a/internal/op/storage.go
+++ b/internal/op/storage.go
@@ -10,6 +10,7 @@ import (
"github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/internal/driver"
+ "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/generic_sync"
"github.com/alist-org/alist/v3/pkg/utils"
@@ -101,11 +102,34 @@ func initStorage(ctx context.Context, storage model.Storage, storageDriver drive
log.Errorf("panic init storage: %s", errInfo)
driverStorage.SetStatus(errInfo)
MustSaveDriverStorage(storageDriver)
- storagesMap.Delete(driverStorage.MountPath)
+ storagesMap.Store(driverStorage.MountPath, storageDriver)
}
}()
// Unmarshal Addition
err = utils.Json.UnmarshalFromString(driverStorage.Addition, storageDriver.GetAddition())
+ if err == nil {
+ if ref, ok := storageDriver.(driver.Reference); ok {
+ if strings.HasPrefix(driverStorage.Remark, "ref:/") {
+ refMountPath := driverStorage.Remark
+ i := strings.Index(refMountPath, "\n")
+ if i > 0 {
+ refMountPath = refMountPath[4:i]
+ } else {
+ refMountPath = refMountPath[4:]
+ }
+ var refStorage driver.Driver
+ refStorage, err = GetStorageByMountPath(refMountPath)
+ if err != nil {
+ err = fmt.Errorf("ref: %w", err)
+ } else {
+ err = ref.InitReference(refStorage)
+ if err != nil && errs.IsNotSupportError(err) {
+ err = fmt.Errorf("ref: storage is not %s", storageDriver.Config().Name)
+ }
+ }
+ }
+ }
+ }
if err == nil {
err = storageDriver.Init(ctx)
}
diff --git a/internal/search/build.go b/internal/search/build.go
index 9865b298..2888c1f4 100644
--- a/internal/search/build.go
+++ b/internal/search/build.go
@@ -157,6 +157,11 @@ func BuildIndex(ctx context.Context, indexPaths, ignorePaths []string, maxDepth
return filepath.SkipDir
}
}
+ if storage, _, err := op.GetStorageAndActualPath(indexPath); err == nil {
+ if storage.GetStorage().DisableIndex {
+ return filepath.SkipDir
+ }
+ }
// ignore root
if indexPath == "/" {
return nil
diff --git a/internal/search/util.go b/internal/search/util.go
index 8d03b740..2e6ac8da 100644
--- a/internal/search/util.go
+++ b/internal/search/util.go
@@ -38,7 +38,7 @@ func WriteProgress(progress *model.IndexProgress) {
}
}
-func updateIgnorePaths() {
+func updateIgnorePaths(customIgnorePaths string) {
storages := op.GetAllStorages()
ignorePaths := make([]string, 0)
var skipDrivers = []string{"AList V2", "AList V3", "Virtual"}
@@ -66,7 +66,6 @@ func updateIgnorePaths() {
}
}
}
- customIgnorePaths := setting.GetStr(conf.IgnorePaths)
if customIgnorePaths != "" {
ignorePaths = append(ignorePaths, strings.Split(customIgnorePaths, "\n")...)
}
@@ -84,13 +83,13 @@ func isIgnorePath(path string) bool {
func init() {
op.RegisterSettingItemHook(conf.IgnorePaths, func(item *model.SettingItem) error {
- updateIgnorePaths()
+ updateIgnorePaths(item.Value)
return nil
})
op.RegisterStorageHook(func(typ string, storage driver.Driver) {
var skipDrivers = []string{"AList V2", "AList V3", "Virtual"}
if utils.SliceContains(skipDrivers, storage.Config().Name) {
- updateIgnorePaths()
+ updateIgnorePaths(setting.GetStr(conf.IgnorePaths))
}
})
}
diff --git a/internal/sign/archive.go b/internal/sign/archive.go
new file mode 100644
index 00000000..26a2c208
--- /dev/null
+++ b/internal/sign/archive.go
@@ -0,0 +1,41 @@
+package sign
+
+import (
+ "sync"
+ "time"
+
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "github.com/alist-org/alist/v3/pkg/sign"
+)
+
+var onceArchive sync.Once
+var instanceArchive sign.Sign
+
+func SignArchive(data string) string {
+ expire := setting.GetInt(conf.LinkExpiration, 0)
+ if expire == 0 {
+ return NotExpiredArchive(data)
+ } else {
+ return WithDurationArchive(data, time.Duration(expire)*time.Hour)
+ }
+}
+
+func WithDurationArchive(data string, d time.Duration) string {
+ onceArchive.Do(InstanceArchive)
+ return instanceArchive.Sign(data, time.Now().Add(d).Unix())
+}
+
+func NotExpiredArchive(data string) string {
+ onceArchive.Do(InstanceArchive)
+ return instanceArchive.Sign(data, 0)
+}
+
+func VerifyArchive(data string, sign string) error {
+ onceArchive.Do(InstanceArchive)
+ return instanceArchive.Verify(data, sign)
+}
+
+func InstanceArchive() {
+ instanceArchive = sign.NewHMACSign([]byte(setting.GetStr(conf.Token) + "-archive"))
+}
diff --git a/internal/stream/limit.go b/internal/stream/limit.go
new file mode 100644
index 00000000..14d0efd0
--- /dev/null
+++ b/internal/stream/limit.go
@@ -0,0 +1,152 @@
+package stream
+
+import (
+ "context"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/pkg/http_range"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "golang.org/x/time/rate"
+ "io"
+ "time"
+)
+
+type Limiter interface {
+ Limit() rate.Limit
+ Burst() int
+ TokensAt(time.Time) float64
+ Tokens() float64
+ Allow() bool
+ AllowN(time.Time, int) bool
+ Reserve() *rate.Reservation
+ ReserveN(time.Time, int) *rate.Reservation
+ Wait(context.Context) error
+ WaitN(context.Context, int) error
+ SetLimit(rate.Limit)
+ SetLimitAt(time.Time, rate.Limit)
+ SetBurst(int)
+ SetBurstAt(time.Time, int)
+}
+
+var (
+ ClientDownloadLimit Limiter
+ ClientUploadLimit Limiter
+ ServerDownloadLimit Limiter
+ ServerUploadLimit Limiter
+)
+
+type RateLimitReader struct {
+ io.Reader
+ Limiter Limiter
+ Ctx context.Context
+}
+
+func (r *RateLimitReader) Read(p []byte) (n int, err error) {
+ if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
+ return 0, r.Ctx.Err()
+ }
+ n, err = r.Reader.Read(p)
+ if err != nil {
+ return
+ }
+ if r.Limiter != nil {
+ if r.Ctx == nil {
+ r.Ctx = context.Background()
+ }
+ err = r.Limiter.WaitN(r.Ctx, n)
+ }
+ return
+}
+
+func (r *RateLimitReader) Close() error {
+ if c, ok := r.Reader.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
+
+type RateLimitWriter struct {
+ io.Writer
+ Limiter Limiter
+ Ctx context.Context
+}
+
+func (w *RateLimitWriter) Write(p []byte) (n int, err error) {
+ if w.Ctx != nil && utils.IsCanceled(w.Ctx) {
+ return 0, w.Ctx.Err()
+ }
+ n, err = w.Writer.Write(p)
+ if err != nil {
+ return
+ }
+ if w.Limiter != nil {
+ if w.Ctx == nil {
+ w.Ctx = context.Background()
+ }
+ err = w.Limiter.WaitN(w.Ctx, n)
+ }
+ return
+}
+
+func (w *RateLimitWriter) Close() error {
+ if c, ok := w.Writer.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
+
+type RateLimitFile struct {
+ model.File
+ Limiter Limiter
+ Ctx context.Context
+}
+
+func (r *RateLimitFile) Read(p []byte) (n int, err error) {
+ if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
+ return 0, r.Ctx.Err()
+ }
+ n, err = r.File.Read(p)
+ if err != nil {
+ return
+ }
+ if r.Limiter != nil {
+ if r.Ctx == nil {
+ r.Ctx = context.Background()
+ }
+ err = r.Limiter.WaitN(r.Ctx, n)
+ }
+ return
+}
+
+func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) {
+ if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
+ return 0, r.Ctx.Err()
+ }
+ n, err = r.File.ReadAt(p, off)
+ if err != nil {
+ return
+ }
+ if r.Limiter != nil {
+ if r.Ctx == nil {
+ r.Ctx = context.Background()
+ }
+ err = r.Limiter.WaitN(r.Ctx, n)
+ }
+ return
+}
+
+type RateLimitRangeReadCloser struct {
+ model.RangeReadCloserIF
+ Limiter Limiter
+}
+
+func (rrc *RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
+ rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange)
+ if err != nil {
+ return nil, err
+ }
+ return &RateLimitReader{
+ Reader: rc,
+ Limiter: rrc.Limiter,
+ Ctx: ctx,
+ }, nil
+}
diff --git a/internal/stream/stream.go b/internal/stream/stream.go
index 2c9543c1..64160915 100644
--- a/internal/stream/stream.go
+++ b/internal/stream/stream.go
@@ -6,12 +6,15 @@ import (
"errors"
"fmt"
"io"
+ "math"
"os"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/sirupsen/logrus"
+ "go4.org/readerutil"
)
type FileStream struct {
@@ -60,6 +63,8 @@ func (f *FileStream) Close() error {
err2 = os.RemoveAll(f.tmpFile.Name())
if err2 != nil {
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name())
+ } else {
+ f.tmpFile = nil
}
}
@@ -89,7 +94,17 @@ func (f *FileStream) CacheFullInTempFile() (model.File, error) {
f.Add(tmpF)
f.tmpFile = tmpF
f.Reader = tmpF
- return f.tmpFile, nil
+ return tmpF, nil
+}
+
+func (f *FileStream) GetFile() model.File {
+ if f.tmpFile != nil {
+ return f.tmpFile
+ }
+ if file, ok := f.Reader.(model.File); ok {
+ return file
+ }
+ return nil
}
const InMemoryBufMaxSize = 10 // Megabytes
@@ -99,33 +114,39 @@ const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024
// also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
if httpRange.Length == -1 {
- httpRange.Length = f.GetSize()
+ // 参考 internal/net/request.go
+ httpRange.Length = f.GetSize() - httpRange.Start
}
- if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) {
+ size := httpRange.Start + httpRange.Length
+ if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) {
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
}
- if f.tmpFile == nil {
- if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
- bufSize := utils.Min(httpRange.Length, f.GetSize())
- newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
- n, err := utils.CopyWithBufferN(newBuf, f.Reader, bufSize)
+ var cache io.ReaderAt = f.GetFile()
+ if cache == nil {
+ if size <= InMemoryBufMaxSizeBytes {
+ bufSize := min(size, f.GetSize())
+ // 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
+ // 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
+ buf := make([]byte, bufSize)
+ n, err := io.ReadFull(f.Reader, buf)
if err != nil {
return nil, err
}
- if n != bufSize {
+ if n != int(bufSize) {
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
}
- f.peekBuff = bytes.NewReader(newBuf.Bytes())
+ f.peekBuff = bytes.NewReader(buf)
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
- return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
+ cache = f.peekBuff
} else {
- _, err := f.CacheFullInTempFile()
+ var err error
+ cache, err = f.CacheFullInTempFile()
if err != nil {
return nil, err
}
}
}
- return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil
+ return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil
}
var _ model.FileStreamer = (*SeekableStream)(nil)
@@ -134,6 +155,10 @@ var _ model.FileStreamer = (*FileStream)(nil)
//var _ seekableStream = (*FileStream)(nil)
// for most internal stream, which is either RangeReadCloser or MFile
+// Any functionality implemented based on SeekableStream should implement a Close method,
+// whose only purpose is to close the SeekableStream object. If such functionality has
+// additional resources that need to be closed, they should be added to the Closer property of
+// the SeekableStream object and be closed together when the SeekableStream object is closed.
type SeekableStream struct {
FileStream
Link *model.Link
@@ -146,37 +171,55 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
if len(fs.Mimetype) == 0 {
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
}
- ss := SeekableStream{FileStream: fs, Link: link}
+ ss := &SeekableStream{FileStream: fs, Link: link}
if ss.Reader != nil {
result, ok := ss.Reader.(model.File)
if ok {
ss.mFile = result
ss.Closers.Add(result)
- return &ss, nil
+ return ss, nil
}
}
if ss.Link != nil {
if ss.Link.MFile != nil {
- ss.mFile = ss.Link.MFile
- ss.Reader = ss.Link.MFile
- ss.Closers.Add(ss.Link.MFile)
- return &ss, nil
+ mFile := ss.Link.MFile
+ if _, ok := mFile.(*os.File); !ok {
+ mFile = &RateLimitFile{
+ File: mFile,
+ Limiter: ServerDownloadLimit,
+ Ctx: fs.Ctx,
+ }
+ }
+ ss.mFile = mFile
+ ss.Reader = mFile
+ ss.Closers.Add(mFile)
+ return ss, nil
}
-
if ss.Link.RangeReadCloser != nil {
- ss.rangeReadCloser = ss.Link.RangeReadCloser
- return &ss, nil
+ ss.rangeReadCloser = &RateLimitRangeReadCloser{
+ RangeReadCloserIF: ss.Link.RangeReadCloser,
+ Limiter: ServerDownloadLimit,
+ }
+ ss.Add(ss.rangeReadCloser)
+ return ss, nil
}
if len(ss.Link.URL) > 0 {
rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link)
if err != nil {
return nil, err
}
+ rrc = &RateLimitRangeReadCloser{
+ RangeReadCloserIF: rrc,
+ Limiter: ServerDownloadLimit,
+ }
ss.rangeReadCloser = rrc
- return &ss, nil
+ ss.Add(rrc)
+ return ss, nil
}
}
-
+ if fs.Reader != nil {
+ return ss, nil
+ }
return nil, fmt.Errorf("illegal seekableStream")
}
@@ -187,7 +230,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
// RangeRead is not thread-safe, pls use it in single thread only.
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
if httpRange.Length == -1 {
- httpRange.Length = ss.GetSize()
+ httpRange.Length = ss.GetSize() - httpRange.Start
}
if ss.mFile != nil {
return io.NewSectionReader(ss.mFile, httpRange.Start, httpRange.Length), nil
@@ -202,7 +245,7 @@ func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, erro
}
return rc, nil
}
- return nil, fmt.Errorf("can't find mFile or rangeReadCloser")
+ return ss.FileStream.RangeRead(httpRange)
}
//func (f *FileStream) GetReader() io.Reader {
@@ -224,8 +267,6 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) {
return 0, nil
}
ss.Reader = io.NopCloser(rc)
- ss.Closers.Add(rc)
-
}
return ss.Reader.Read(p)
}
@@ -244,10 +285,308 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
ss.Add(tmpF)
ss.tmpFile = tmpF
ss.Reader = tmpF
- return ss.tmpFile, nil
+ return tmpF, nil
+}
+
+func (ss *SeekableStream) GetFile() model.File {
+ if ss.tmpFile != nil {
+ return ss.tmpFile
+ }
+ if ss.mFile != nil {
+ return ss.mFile
+ }
+ return nil
}
func (f *FileStream) SetTmpFile(r *os.File) {
- f.Reader = r
+ f.Add(r)
f.tmpFile = r
+ f.Reader = r
+}
+
+type ReaderWithSize interface {
+ io.ReadCloser
+ GetSize() int64
+}
+
+type SimpleReaderWithSize struct {
+ io.Reader
+ Size int64
+}
+
+func (r *SimpleReaderWithSize) GetSize() int64 {
+ return r.Size
+}
+
+func (r *SimpleReaderWithSize) Close() error {
+ if c, ok := r.Reader.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
+
+type ReaderUpdatingProgress struct {
+ Reader ReaderWithSize
+ model.UpdateProgress
+ offset int
+}
+
+func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) {
+ n, err = r.Reader.Read(p)
+ r.offset += n
+ r.UpdateProgress(math.Min(100.0, float64(r.offset)/float64(r.Reader.GetSize())*100.0))
+ return n, err
+}
+
+func (r *ReaderUpdatingProgress) Close() error {
+ return r.Reader.Close()
+}
+
+type SStreamReadAtSeeker interface {
+ model.File
+ GetRawStream() *SeekableStream
+}
+
+type readerCur struct {
+ reader io.Reader
+ cur int64
+}
+
+type RangeReadReadAtSeeker struct {
+ ss *SeekableStream
+ masterOff int64
+ readers []*readerCur
+ headCache *headCache
+}
+
+type headCache struct {
+ *readerCur
+ bufs [][]byte
+}
+
+func (c *headCache) read(p []byte) (n int, err error) {
+ pL := len(p)
+ logrus.Debugf("headCache read_%d", pL)
+ if c.cur < int64(pL) {
+ bufL := int64(pL) - c.cur
+ buf := make([]byte, bufL)
+ lr := io.LimitReader(c.reader, bufL)
+ off := 0
+ for c.cur < int64(pL) {
+ n, err = lr.Read(buf[off:])
+ off += n
+ c.cur += int64(n)
+ if err == io.EOF && off == int(bufL) {
+ err = nil
+ }
+ if err != nil {
+ break
+ }
+ }
+ c.bufs = append(c.bufs, buf)
+ }
+ n = 0
+ if c.cur >= int64(pL) {
+ for i := 0; n < pL; i++ {
+ buf := c.bufs[i]
+ r := len(buf)
+ if n+r > pL {
+ r = pL - n
+ }
+ n += copy(p[n:], buf[:r])
+ }
+ }
+ return
+}
+func (r *headCache) Close() error {
+ for i := range r.bufs {
+ r.bufs[i] = nil
+ }
+ r.bufs = nil
+ return nil
+}
+
+func (r *RangeReadReadAtSeeker) InitHeadCache() {
+ if r.ss.Link.MFile == nil && r.masterOff == 0 {
+ reader := r.readers[0]
+ r.readers = r.readers[1:]
+ r.headCache = &headCache{readerCur: reader}
+ r.ss.Closers.Add(r.headCache)
+ }
+}
+
+func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) {
+ if ss.mFile != nil {
+ _, err := ss.mFile.Seek(offset, io.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+ return &FileReadAtSeeker{ss: ss}, nil
+ }
+ r := &RangeReadReadAtSeeker{
+ ss: ss,
+ masterOff: offset,
+ }
+ if offset != 0 || utils.IsBool(forceRange...) {
+ if offset < 0 || offset > ss.GetSize() {
+ return nil, errors.New("offset out of range")
+ }
+ _, err := r.getReaderAtOffset(offset)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ rc := &readerCur{reader: ss, cur: offset}
+ r.readers = append(r.readers, rc)
+ }
+ return r, nil
+}
+
+func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
+ readers := make([]readerutil.SizeReaderAt, 0, len(ss))
+ for _, s := range ss {
+ ra, err := NewReadAtSeeker(s, 0)
+ if err != nil {
+ return nil, err
+ }
+ readers = append(readers, io.NewSectionReader(ra, 0, s.GetSize()))
+ }
+ return readerutil.NewMultiReaderAt(readers...), nil
+}
+
+func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
+ return r.ss
+}
+
+func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) {
+ var rc *readerCur
+ for _, reader := range r.readers {
+ if reader.cur == -1 {
+ continue
+ }
+ if reader.cur == off {
+ // logrus.Debugf("getReaderAtOffset match_%d", off)
+ return reader, nil
+ }
+ if reader.cur > 0 && off >= reader.cur && (rc == nil || reader.cur < rc.cur) {
+ rc = reader
+ }
+ }
+ if rc != nil && off-rc.cur <= utils.MB {
+ n, err := utils.CopyWithBufferN(io.Discard, rc.reader, off-rc.cur)
+ rc.cur += n
+ if err == io.EOF && rc.cur == off {
+ err = nil
+ }
+ if err == nil {
+ logrus.Debugf("getReaderAtOffset old_%d", off)
+ return rc, nil
+ }
+ rc.cur = -1
+ }
+ logrus.Debugf("getReaderAtOffset new_%d", off)
+
+ // Range请求不能超过文件大小,有些云盘处理不了就会返回整个文件
+ reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: r.ss.GetSize() - off})
+ if err != nil {
+ return nil, err
+ }
+ rc = &readerCur{reader: reader, cur: off}
+ r.readers = append(r.readers, rc)
+ return rc, nil
+}
+
+func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) {
+ if off == 0 && r.headCache != nil {
+ return r.headCache.read(p)
+ }
+ rc, err := r.getReaderAtOffset(off)
+ if err != nil {
+ return 0, err
+ }
+ n, num := 0, 0
+ for num < len(p) {
+ n, err = rc.reader.Read(p[num:])
+ rc.cur += int64(n)
+ num += n
+ if err == nil {
+ continue
+ }
+ if err == io.EOF {
+ // io.EOF是reader读取完了
+ rc.cur = -1
+ // yeka/zip包 没有处理EOF,我们要兼容
+ // https://github.com/yeka/zip/blob/03d6312748a9d6e0bc0c9a7275385c09f06d9c14/reader.go#L433
+ if num == len(p) {
+ err = nil
+ }
+ }
+ break
+ }
+ return num, err
+}
+
+func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ if offset == 0 {
+ return r.masterOff, nil
+ }
+ offset += r.masterOff
+ case io.SeekEnd:
+ offset += r.ss.GetSize()
+ default:
+ return 0, errs.NotSupport
+ }
+ if offset < 0 {
+ return r.masterOff, errors.New("invalid seek: negative position")
+ }
+ if offset > r.ss.GetSize() {
+ return r.masterOff, io.EOF
+ }
+ r.masterOff = offset
+ return offset, nil
+}
+
+func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
+ if r.masterOff == 0 && r.headCache != nil {
+ return r.headCache.read(p)
+ }
+ rc, err := r.getReaderAtOffset(r.masterOff)
+ if err != nil {
+ return 0, err
+ }
+ n, err = rc.reader.Read(p)
+ rc.cur += int64(n)
+ r.masterOff += int64(n)
+ return n, err
+}
+
+func (r *RangeReadReadAtSeeker) Close() error {
+ return r.ss.Close()
+}
+
+type FileReadAtSeeker struct {
+ ss *SeekableStream
+}
+
+func (f *FileReadAtSeeker) GetRawStream() *SeekableStream {
+ return f.ss
+}
+
+func (f *FileReadAtSeeker) Read(p []byte) (n int, err error) {
+ return f.ss.mFile.Read(p)
+}
+
+func (f *FileReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
+ return f.ss.mFile.ReadAt(p, off)
+}
+
+func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
+ return f.ss.mFile.Seek(offset, whence)
+}
+
+func (f *FileReadAtSeeker) Close() error {
+ return f.ss.Close()
}
diff --git a/internal/stream/util.go b/internal/stream/util.go
index 7d2b7ef7..5b935a90 100644
--- a/internal/stream/util.go
+++ b/internal/stream/util.go
@@ -2,14 +2,15 @@ package stream
import (
"context"
+ "encoding/hex"
"fmt"
"io"
"net/http"
- "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/pkg/http_range"
+ "github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
)
@@ -17,10 +18,9 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
if len(link.URL) == 0 {
return nil, fmt.Errorf("can't create RangeReadCloser since URL is empty in link")
}
- //remoteClosers := utils.EmptyClosers()
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
if link.Concurrency != 0 || link.PartSize != 0 {
- header := net.ProcessHeader(http.Header{}, link.Header)
+ header := net.ProcessHeader(nil, link.Header)
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
@@ -32,44 +32,36 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
HeaderRef: header,
}
rc, err := down.Download(ctx, req)
- if err != nil {
- return nil, errs.NewErr(err, "GetReadCloserFromLink failed")
- }
- return rc, nil
+ return rc, err
}
- if len(link.URL) > 0 {
- response, err := RequestRangedHttp(ctx, link, r.Start, r.Length)
- if err != nil {
- if response == nil {
- return nil, fmt.Errorf("http request failure, err:%s", err)
- }
- return nil, fmt.Errorf("http request failure,status: %d err:%s", response.StatusCode, err)
+ response, err := RequestRangedHttp(ctx, link, r.Start, r.Length)
+ if err != nil {
+ if response == nil {
+ return nil, fmt.Errorf("http request failure, err:%s", err)
}
- if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent ||
- checkContentRange(&response.Header, r.Start) {
- return response.Body, nil
- } else if response.StatusCode == http.StatusOK {
- log.Warnf("remote http server not supporting range request, expect low perfromace!")
- readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length)
- if err != nil {
- return nil, err
- }
- return readCloser, nil
-
- }
-
+ return nil, err
+ }
+ if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent ||
+ checkContentRange(&response.Header, r.Start) {
return response.Body, nil
+ } else if response.StatusCode == http.StatusOK {
+ log.Warnf("remote http server not supporting range request, expect low perfromace!")
+ readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length)
+ if err != nil {
+ return nil, err
+ }
+ return readCloser, nil
}
- return nil, errs.NotSupport
+ return response.Body, nil
}
resultRangeReadCloser := model.RangeReadCloser{RangeReader: rangeReaderFunc}
return &resultRangeReadCloser, nil
}
func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) {
- header := net.ProcessHeader(http.Header{}, link.Header)
+ header := net.ProcessHeader(nil, link.Header)
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
return net.RequestHttp(ctx, "GET", header, link.URL)
@@ -86,3 +78,64 @@ func checkContentRange(header *http.Header, offset int64) bool {
}
return false
}
+
+type ReaderWithCtx struct {
+ io.Reader
+ Ctx context.Context
+}
+
+func (r *ReaderWithCtx) Read(p []byte) (n int, err error) {
+ if utils.IsCanceled(r.Ctx) {
+ return 0, r.Ctx.Err()
+ }
+ return r.Reader.Read(p)
+}
+
+func (r *ReaderWithCtx) Close() error {
+ if c, ok := r.Reader.(io.Closer); ok {
+ return c.Close()
+ }
+ return nil
+}
+
+func CacheFullInTempFileAndUpdateProgress(stream model.FileStreamer, up model.UpdateProgress) (model.File, error) {
+ if cache := stream.GetFile(); cache != nil {
+ up(100)
+ return cache, nil
+ }
+ tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{
+ Reader: stream,
+ UpdateProgress: up,
+ }, stream.GetSize())
+ if err == nil {
+ stream.SetTmpFile(tmpF)
+ }
+ return tmpF, err
+}
+
+func CacheFullInTempFileAndWriter(stream model.FileStreamer, w io.Writer) (model.File, error) {
+ if cache := stream.GetFile(); cache != nil {
+ _, err := cache.Seek(0, io.SeekStart)
+ if err == nil {
+ _, err = utils.CopyWithBuffer(w, cache)
+ if err == nil {
+ _, err = cache.Seek(0, io.SeekStart)
+ }
+ }
+ return cache, err
+ }
+ tmpF, err := utils.CreateTempFile(io.TeeReader(stream, w), stream.GetSize())
+ if err == nil {
+ stream.SetTmpFile(tmpF)
+ }
+ return tmpF, err
+}
+
+func CacheFullInTempFileAndHash(stream model.FileStreamer, hashType *utils.HashType, params ...any) (model.File, string, error) {
+ h := hashType.NewFunc(params...)
+ tmpF, err := CacheFullInTempFileAndWriter(stream, h)
+ if err != nil {
+ return nil, "", err
+ }
+ return tmpF, hex.EncodeToString(h.Sum(nil)), err
+}
diff --git a/internal/task/base.go b/internal/task/base.go
new file mode 100644
index 00000000..c3703bd1
--- /dev/null
+++ b/internal/task/base.go
@@ -0,0 +1,90 @@
+package task
+
+import (
+ "context"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/xhofe/tache"
+ "sync"
+ "time"
+)
+
+type TaskExtension struct {
+ tache.Base
+ ctx context.Context
+ ctxInitMutex sync.Mutex
+ Creator *model.User
+ startTime *time.Time
+ endTime *time.Time
+ totalBytes int64
+}
+
+func (t *TaskExtension) SetCreator(creator *model.User) {
+ t.Creator = creator
+ t.Persist()
+}
+
+func (t *TaskExtension) GetCreator() *model.User {
+ return t.Creator
+}
+
+func (t *TaskExtension) SetStartTime(startTime time.Time) {
+ t.startTime = &startTime
+}
+
+func (t *TaskExtension) GetStartTime() *time.Time {
+ return t.startTime
+}
+
+func (t *TaskExtension) SetEndTime(endTime time.Time) {
+ t.endTime = &endTime
+}
+
+func (t *TaskExtension) GetEndTime() *time.Time {
+ return t.endTime
+}
+
+func (t *TaskExtension) ClearEndTime() {
+ t.endTime = nil
+}
+
+func (t *TaskExtension) SetTotalBytes(totalBytes int64) {
+ t.totalBytes = totalBytes
+}
+
+func (t *TaskExtension) GetTotalBytes() int64 {
+ return t.totalBytes
+}
+
+func (t *TaskExtension) Ctx() context.Context {
+ if t.ctx == nil {
+ t.ctxInitMutex.Lock()
+ if t.ctx == nil {
+ t.ctx = context.WithValue(t.Base.Ctx(), "user", t.Creator)
+ }
+ t.ctxInitMutex.Unlock()
+ }
+ return t.ctx
+}
+
+func (t *TaskExtension) ReinitCtx() {
+ if !conf.Conf.Tasks.AllowRetryCanceled {
+ return
+ }
+ select {
+ case <-t.Base.Ctx().Done():
+ ctx, cancel := context.WithCancel(context.Background())
+ t.SetCtx(ctx)
+ t.SetCancelFunc(cancel)
+ t.ctx = nil
+ default:
+ }
+}
+
+type TaskExtensionInfo interface {
+ tache.TaskWithInfo
+ GetCreator() *model.User
+ GetStartTime() *time.Time
+ GetEndTime() *time.Time
+ GetTotalBytes() int64
+}
diff --git a/internal/task/manager.go b/internal/task/manager.go
new file mode 100644
index 00000000..3caa685a
--- /dev/null
+++ b/internal/task/manager.go
@@ -0,0 +1,20 @@
+package task
+
+import "github.com/xhofe/tache"
+
+type Manager[T tache.Task] interface {
+ Add(task T)
+ Cancel(id string)
+ CancelAll()
+ CancelByCondition(condition func(task T) bool)
+ GetAll() []T
+ GetByID(id string) (T, bool)
+ GetByState(state ...tache.State) []T
+ GetByCondition(condition func(task T) bool) []T
+ Remove(id string)
+ RemoveAll()
+ RemoveByState(state ...tache.State)
+ RemoveByCondition(condition func(task T) bool)
+ Retry(id string)
+ RetryAllFailed()
+}
diff --git a/pkg/utils/hash.go b/pkg/utils/hash.go
index fa06bcc2..a281dd4e 100644
--- a/pkg/utils/hash.go
+++ b/pkg/utils/hash.go
@@ -10,6 +10,7 @@ import (
"errors"
"hash"
"io"
+ "iter"
"github.com/alist-org/alist/v3/internal/errs"
log "github.com/sirupsen/logrus"
@@ -226,3 +227,13 @@ func (hi HashInfo) GetHash(ht *HashType) string {
func (hi HashInfo) Export() map[*HashType]string {
return hi.h
}
+
+func (hi HashInfo) All() iter.Seq2[*HashType, string] {
+ return func(yield func(*HashType, string) bool) {
+ for hashType, hashValue := range hi.h {
+ if !yield(hashType, hashValue) {
+ return
+ }
+ }
+ }
+}
diff --git a/pkg/utils/path.go b/pkg/utils/path.go
index c0793a3e..135f8e4e 100644
--- a/pkg/utils/path.go
+++ b/pkg/utils/path.go
@@ -45,7 +45,7 @@ func IsSubPath(path string, subPath string) bool {
func Ext(path string) string {
ext := stdpath.Ext(path)
- if strings.HasPrefix(ext, ".") {
+ if len(ext) > 0 && ext[0] == '.' {
ext = ext[1:]
}
return strings.ToLower(ext)
diff --git a/pkg/utils/random/random.go b/pkg/utils/random/random.go
index 65fbf14a..c3f3dd48 100644
--- a/pkg/utils/random/random.go
+++ b/pkg/utils/random/random.go
@@ -1,20 +1,27 @@
package random
import (
- "math/rand"
+ "crypto/rand"
+ "math/big"
+ mathRand "math/rand"
"time"
"github.com/google/uuid"
)
-var Rand *rand.Rand
+var Rand *mathRand.Rand
const letterBytes = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
func String(n int) string {
b := make([]byte, n)
+ letterLen := big.NewInt(int64(len(letterBytes)))
for i := range b {
- b[i] = letterBytes[Rand.Intn(len(letterBytes))]
+ idx, err := rand.Int(rand.Reader, letterLen)
+ if err != nil {
+ panic(err)
+ }
+ b[i] = letterBytes[idx.Int64()]
}
return string(b)
}
@@ -24,10 +31,10 @@ func Token() string {
}
func RangeInt64(left, right int64) int64 {
- return rand.Int63n(left+right) - left
+ return mathRand.Int63n(left+right) - left
}
func init() {
- s := rand.NewSource(time.Now().UnixNano())
- Rand = rand.New(s)
+ s := mathRand.NewSource(time.Now().UnixNano())
+ Rand = mathRand.New(s)
}
diff --git a/pkg/utils/time.go b/pkg/utils/time.go
index aa706928..36573b4e 100644
--- a/pkg/utils/time.go
+++ b/pkg/utils/time.go
@@ -34,31 +34,36 @@ func NewDebounce2(interval time.Duration, f func()) func() {
if timer == nil {
timer = time.AfterFunc(interval, f)
}
- (*time.Timer)(timer).Reset(interval)
+ timer.Reset(interval)
}
}
func NewThrottle(interval time.Duration) func(func()) {
var lastCall time.Time
-
+ var lock sync.Mutex
return func(fn func()) {
+ lock.Lock()
+ defer lock.Unlock()
+
now := time.Now()
- if now.Sub(lastCall) < interval {
- return
+ if now.Sub(lastCall) >= interval {
+ lastCall = now
+ go fn()
}
- time.AfterFunc(interval, fn)
- lastCall = now
}
}
func NewThrottle2(interval time.Duration, fn func()) func() {
var lastCall time.Time
+ var lock sync.Mutex
return func() {
+ lock.Lock()
+ defer lock.Unlock()
+
now := time.Now()
- if now.Sub(lastCall) < interval {
- return
+ if now.Sub(lastCall) >= interval {
+ lastCall = now
+ go fn()
}
- time.AfterFunc(interval, fn)
- lastCall = now
}
}
diff --git a/server/common/base.go b/server/common/base.go
index eb6ef2b8..11a28d25 100644
--- a/server/common/base.go
+++ b/server/common/base.go
@@ -12,16 +12,16 @@ import (
func GetApiUrl(r *http.Request) string {
api := conf.Conf.SiteURL
if strings.HasPrefix(api, "http") {
- return api
+ return strings.TrimSuffix(api, "/")
}
if r != nil {
protocol := "http"
if r.TLS != nil || r.Header.Get("X-Forwarded-Proto") == "https" {
protocol = "https"
}
- host := r.Host
- if r.Header.Get("X-Forwarded-Host") != "" {
- host = r.Header.Get("X-Forwarded-Host")
+ host := r.Header.Get("X-Forwarded-Host")
+ if host == "" {
+ host = r.Host
}
api = fmt.Sprintf("%s://%s", protocol, stdpath.Join(host, api))
}
diff --git a/server/common/common.go b/server/common/common.go
index 28d2da44..33ae704e 100644
--- a/server/common/common.go
+++ b/server/common/common.go
@@ -1,6 +1,8 @@
package common
import (
+ "context"
+ "net/http"
"strings"
"github.com/alist-org/alist/v3/cmd/flags"
@@ -66,17 +68,32 @@ func ErrorStrResp(c *gin.Context, str string, code int, l ...bool) {
}
func SuccessResp(c *gin.Context, data ...interface{}) {
- if len(data) == 0 {
- c.JSON(200, Resp[interface{}]{
- Code: 200,
- Message: "success",
- Data: nil,
- })
- return
+ SuccessWithMsgResp(c, "success", data...)
+}
+
+func SuccessWithMsgResp(c *gin.Context, msg string, data ...interface{}) {
+ var respData interface{}
+ if len(data) > 0 {
+ respData = data[0]
}
+
c.JSON(200, Resp[interface{}]{
Code: 200,
- Message: "success",
- Data: data[0],
+ Message: msg,
+ Data: respData,
})
}
+
+func Pluralize(count int, singular, plural string) string {
+ if count == 1 {
+ return singular
+ }
+ return plural
+}
+
+func GetHttpReq(ctx context.Context) *http.Request {
+ if c, ok := ctx.(*gin.Context); ok {
+ return c.Request
+ }
+ return nil
+}
diff --git a/server/common/proxy.go b/server/common/proxy.go
index 10923613..ca7f6325 100644
--- a/server/common/proxy.go
+++ b/server/common/proxy.go
@@ -6,6 +6,10 @@ import (
"io"
"net/http"
"net/url"
+ "os"
+ "strings"
+
+ "maps"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
@@ -18,27 +22,36 @@ import (
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
if link.MFile != nil {
defer link.MFile.Close()
- attachFileName(w, file)
+ attachHeader(w, file)
contentType := link.Header.Get("Content-Type")
if contentType != "" {
w.Header().Set("Content-Type", contentType)
}
- http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
+ mFile := link.MFile
+ if _, ok := mFile.(*os.File); !ok {
+ mFile = &stream.RateLimitFile{
+ File: mFile,
+ Limiter: stream.ServerDownloadLimit,
+ Ctx: r.Context(),
+ }
+ }
+ http.ServeContent(w, r, file.GetName(), file.ModTime(), mFile)
return nil
} else if link.RangeReadCloser != nil {
- attachFileName(w, file)
- net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeRead)
- defer func() {
- _ = link.RangeReadCloser.Close()
- }()
- return nil
+ attachHeader(w, file)
+ return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{
+ RangeReadCloserIF: link.RangeReadCloser,
+ Limiter: stream.ServerDownloadLimit,
+ })
} else if link.Concurrency != 0 || link.PartSize != 0 {
- attachFileName(w, file)
+ attachHeader(w, file)
size := file.GetSize()
- //var finalClosers model.Closers
- finalClosers := utils.EmptyClosers()
- header := net.ProcessHeader(r.Header, link.Header)
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
+ requestHeader := ctx.Value("request_header")
+ if requestHeader == nil {
+ requestHeader = http.Header{}
+ }
+ header := net.ProcessHeader(requestHeader.(http.Header), link.Header)
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
@@ -50,39 +63,52 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
HeaderRef: header,
}
rc, err := down.Download(ctx, req)
- finalClosers.Add(rc)
return rc, err
}
- net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), rangeReader)
- defer finalClosers.Close()
- return nil
+ return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{
+ RangeReadCloserIF: &model.RangeReadCloser{RangeReader: rangeReader},
+ Limiter: stream.ServerDownloadLimit,
+ })
} else {
//transparent proxy
header := net.ProcessHeader(r.Header, link.Header)
- res, err := net.RequestHttp(context.Background(), r.Method, header, link.URL)
+ res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL)
if err != nil {
return err
}
defer res.Body.Close()
- for h, v := range res.Header {
- w.Header()[h] = v
- }
+ maps.Copy(w.Header(), res.Header)
w.WriteHeader(res.StatusCode)
if r.Method == http.MethodHead {
return nil
}
- _, err = io.Copy(w, res.Body)
- if err != nil {
- return err
- }
- return nil
+ _, err = utils.CopyWithBuffer(w, &stream.RateLimitReader{
+ Reader: res.Body,
+ Limiter: stream.ServerDownloadLimit,
+ Ctx: r.Context(),
+ })
+ return err
}
}
-func attachFileName(w http.ResponseWriter, file model.Obj) {
+func attachHeader(w http.ResponseWriter, file model.Obj) {
fileName := file.GetName()
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, fileName, url.PathEscape(fileName)))
w.Header().Set("Content-Type", utils.GetMimeType(fileName))
+ w.Header().Set("Etag", GetEtag(file))
+}
+func GetEtag(file model.Obj) string {
+ hash := ""
+ for _, v := range file.GetHash().Export() {
+ if strings.Compare(v, hash) > 0 {
+ hash = v
+ }
+ }
+ if len(hash) > 0 {
+ return fmt.Sprintf(`"%s"`, hash)
+ }
+ // 参考nginx
+ return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), file.GetSize())
}
var NoProxyRange = &model.RangeReadCloser{}
@@ -102,3 +128,29 @@ func ProxyRange(link *model.Link, size int64) {
link.RangeReadCloser = nil
}
}
+
+type InterceptResponseWriter struct {
+ http.ResponseWriter
+ io.Writer
+}
+
+func (iw *InterceptResponseWriter) Write(p []byte) (int, error) {
+ return iw.Writer.Write(p)
+}
+
+type WrittenResponseWriter struct {
+ http.ResponseWriter
+ written bool
+}
+
+func (ww *WrittenResponseWriter) Write(p []byte) (int, error) {
+ n, err := ww.ResponseWriter.Write(p)
+ if !ww.written && n > 0 {
+ ww.written = true
+ }
+ return n, err
+}
+
+func (ww *WrittenResponseWriter) IsWritten() bool {
+ return ww.written
+}
diff --git a/server/debug.go b/server/debug.go
index 081ef8c3..a4242abd 100644
--- a/server/debug.go
+++ b/server/debug.go
@@ -5,6 +5,7 @@ import (
_ "net/http/pprof"
"runtime"
+ "github.com/alist-org/alist/v3/internal/sign"
"github.com/alist-org/alist/v3/server/common"
"github.com/alist-org/alist/v3/server/middlewares"
"github.com/gin-gonic/gin"
@@ -15,7 +16,7 @@ func _pprof(g *gin.RouterGroup) {
}
func debug(g *gin.RouterGroup) {
- g.GET("/path/*path", middlewares.Down, func(ctx *gin.Context) {
+ g.GET("/path/*path", middlewares.Down(sign.Verify), func(ctx *gin.Context) {
rawPath := ctx.MustGet("path").(string)
ctx.JSON(200, gin.H{
"path": rawPath,
diff --git a/server/ftp.go b/server/ftp.go
new file mode 100644
index 00000000..4d507b68
--- /dev/null
+++ b/server/ftp.go
@@ -0,0 +1,288 @@
+package server
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/alist-org/alist/v3/server/ftp"
+ "math/rand"
+ "net"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type FtpMainDriver struct {
+ settings *ftpserver.Settings
+ proxyHeader *http.Header
+ clients map[uint32]ftpserver.ClientContext
+ shutdownLock sync.RWMutex
+ isShutdown bool
+ tlsConfig *tls.Config
+}
+
+func NewMainDriver() (*FtpMainDriver, error) {
+ header := &http.Header{}
+ header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent))
+ transferType := ftpserver.TransferTypeASCII
+ if conf.Conf.FTP.DefaultTransferBinary {
+ transferType = ftpserver.TransferTypeBinary
+ }
+ activeConnCheck := ftpserver.IPMatchDisabled
+ if conf.Conf.FTP.EnableActiveConnIPCheck {
+ activeConnCheck = ftpserver.IPMatchRequired
+ }
+ pasvConnCheck := ftpserver.IPMatchDisabled
+ if conf.Conf.FTP.EnablePasvConnIPCheck {
+ pasvConnCheck = ftpserver.IPMatchRequired
+ }
+ tlsRequired := ftpserver.ClearOrEncrypted
+ if setting.GetBool(conf.FTPImplicitTLS) {
+ tlsRequired = ftpserver.ImplicitEncryption
+ } else if setting.GetBool(conf.FTPMandatoryTLS) {
+ tlsRequired = ftpserver.MandatoryEncryption
+ }
+ tlsConf, err := getTlsConf(setting.GetStr(conf.FTPTLSPrivateKeyPath), setting.GetStr(conf.FTPTLSPublicCertPath))
+ if err != nil && tlsRequired != ftpserver.ClearOrEncrypted {
+ return nil, fmt.Errorf("FTP mandatory TLS has been enabled, but the certificate failed to load: %w", err)
+ }
+ return &FtpMainDriver{
+ settings: &ftpserver.Settings{
+ ListenAddr: conf.Conf.FTP.Listen,
+ PublicHost: lookupIP(setting.GetStr(conf.FTPPublicHost)),
+ PassiveTransferPortGetter: newPortMapper(setting.GetStr(conf.FTPPasvPortMap)),
+ FindPasvPortAttempts: conf.Conf.FTP.FindPasvPortAttempts,
+ ActiveTransferPortNon20: conf.Conf.FTP.ActiveTransferPortNon20,
+ IdleTimeout: conf.Conf.FTP.IdleTimeout,
+ ConnectionTimeout: conf.Conf.FTP.ConnectionTimeout,
+ DisableMLSD: false,
+ DisableMLST: false,
+ DisableMFMT: true,
+ Banner: setting.GetStr(conf.Announcement),
+ TLSRequired: tlsRequired,
+ DisableLISTArgs: false,
+ DisableSite: false,
+ DisableActiveMode: conf.Conf.FTP.DisableActiveMode,
+ EnableHASH: false,
+ DisableSTAT: false,
+ DisableSYST: false,
+ EnableCOMB: false,
+ DefaultTransferType: transferType,
+ ActiveConnectionsCheck: activeConnCheck,
+ PasvConnectionsCheck: pasvConnCheck,
+ SiteHandlers: map[string]ftpserver.SiteHandler{
+ "SIZE": ftp.HandleSIZE,
+ },
+ },
+ proxyHeader: header,
+ clients: make(map[uint32]ftpserver.ClientContext),
+ shutdownLock: sync.RWMutex{},
+ isShutdown: false,
+ tlsConfig: tlsConf,
+ }, nil
+}
+
+func (d *FtpMainDriver) GetSettings() (*ftpserver.Settings, error) {
+ return d.settings, nil
+}
+
+func (d *FtpMainDriver) ClientConnected(cc ftpserver.ClientContext) (string, error) {
+ if d.isShutdown || !d.shutdownLock.TryRLock() {
+ return "", errors.New("server has shutdown")
+ }
+ defer d.shutdownLock.RUnlock()
+ d.clients[cc.ID()] = cc
+ return "AList FTP Endpoint", nil
+}
+
+func (d *FtpMainDriver) ClientDisconnected(cc ftpserver.ClientContext) {
+ err := cc.Close()
+ if err != nil {
+ utils.Log.Errorf("failed to close client: %v", err)
+ }
+ delete(d.clients, cc.ID())
+}
+
+func (d *FtpMainDriver) AuthUser(cc ftpserver.ClientContext, user, pass string) (ftpserver.ClientDriver, error) {
+ var userObj *model.User
+ var err error
+ if user == "anonymous" || user == "guest" {
+ userObj, err = op.GetGuest()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ userObj, err = op.GetUserByName(user)
+ if err != nil {
+ return nil, err
+ }
+ passHash := model.StaticHash(pass)
+ if err = userObj.ValidatePwdStaticHash(passHash); err != nil {
+ return nil, err
+ }
+ }
+ if userObj.Disabled || !userObj.CanFTPAccess() {
+ return nil, errors.New("user is not allowed to access via FTP")
+ }
+
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, "user", userObj)
+ if user == "anonymous" || user == "guest" {
+ ctx = context.WithValue(ctx, "meta_pass", pass)
+ } else {
+ ctx = context.WithValue(ctx, "meta_pass", "")
+ }
+ ctx = context.WithValue(ctx, "client_ip", cc.RemoteAddr().String())
+ ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader)
+ return ftp.NewAferoAdapter(ctx), nil
+}
+
+func (d *FtpMainDriver) GetTLSConfig() (*tls.Config, error) {
+ if d.tlsConfig == nil {
+ return nil, errors.New("TLS config not provided")
+ }
+ return d.tlsConfig, nil
+}
+
+func (d *FtpMainDriver) Stop() {
+ d.isShutdown = true
+ d.shutdownLock.Lock()
+ defer d.shutdownLock.Unlock()
+ for _, value := range d.clients {
+ _ = value.Close()
+ }
+}
+
+func lookupIP(host string) string {
+ if host == "" || net.ParseIP(host) != nil {
+ return host
+ }
+ ips, err := net.LookupIP(host)
+ if err != nil || len(ips) == 0 {
+ utils.Log.Fatalf("given FTP public host is invalid, and the default value will be used: %v", err)
+ return ""
+ }
+ for _, ip := range ips {
+ if ip.To4() != nil {
+ return ip.String()
+ }
+ }
+ v6 := ips[0].String()
+ utils.Log.Warnf("no IPv4 record looked up, %s will be used as public host, and it might do not work.", v6)
+ return v6
+}
+
+func newPortMapper(str string) ftpserver.PasvPortGetter {
+ if str == "" {
+ return nil
+ }
+ pasvPortMappers := strings.Split(strings.Replace(str, "\n", ",", -1), ",")
+ type group struct {
+ ExposedStart int
+ ListenedStart int
+ Length int
+ }
+ groups := make([]group, len(pasvPortMappers))
+ totalLength := 0
+ convertToPorts := func(str string) (int, int, error) {
+ start, end, multi := strings.Cut(str, "-")
+ if multi {
+ si, err := strconv.Atoi(start)
+ if err != nil {
+ return 0, 0, err
+ }
+ ei, err := strconv.Atoi(end)
+ if err != nil {
+ return 0, 0, err
+ }
+ if ei < si || ei < 1024 || si < 1024 || ei > 65535 || si > 65535 {
+ return 0, 0, errors.New("invalid port")
+ }
+ return si, ei - si + 1, nil
+ } else {
+ ret, err := strconv.Atoi(str)
+ if err != nil {
+ return 0, 0, err
+ } else {
+ return ret, 1, nil
+ }
+ }
+ }
+ for i, mapper := range pasvPortMappers {
+ var err error
+ exposed, listened, mapped := strings.Cut(mapper, ":")
+ for {
+ if mapped {
+ var es, ls, el, ll int
+ es, el, err = convertToPorts(exposed)
+ if err != nil {
+ break
+ }
+ ls, ll, err = convertToPorts(listened)
+ if err != nil {
+ break
+ }
+ if el != ll {
+ err = errors.New("the number of exposed ports and listened ports does not match")
+ break
+ }
+ groups[i].ExposedStart = es
+ groups[i].ListenedStart = ls
+ groups[i].Length = el
+ totalLength += el
+ } else {
+ var start, length int
+ start, length, err = convertToPorts(mapper)
+ groups[i].ExposedStart = start
+ groups[i].ListenedStart = start
+ groups[i].Length = length
+ totalLength += length
+ }
+ break
+ }
+ if err != nil {
+ utils.Log.Fatalf("failed to convert FTP PASV port mapper %s: %v, the port mapper will be ignored.", mapper, err)
+ return nil
+ }
+ }
+ return func() (int, int, bool) {
+ idxPort := rand.Intn(totalLength)
+ for _, g := range groups {
+ if idxPort >= g.Length {
+ idxPort -= g.Length
+ } else {
+ return g.ExposedStart + idxPort, g.ListenedStart + idxPort, true
+ }
+ }
+ // unreachable
+ return 0, 0, false
+ }
+}
+
+func getTlsConf(keyPath, certPath string) (*tls.Config, error) {
+ if keyPath == "" || certPath == "" {
+ return nil, errors.New("private key or certificate is not provided")
+ }
+ cert, err := os.ReadFile(certPath)
+ if err != nil {
+ return nil, err
+ }
+ key, err := os.ReadFile(keyPath)
+ if err != nil {
+ return nil, err
+ }
+ tlsCert, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ return nil, err
+ }
+ return &tls.Config{Certificates: []tls.Certificate{tlsCert}}, nil
+}
diff --git a/server/ftp/afero.go b/server/ftp/afero.go
new file mode 100644
index 00000000..75ae2e43
--- /dev/null
+++ b/server/ftp/afero.go
@@ -0,0 +1,121 @@
+package ftp
+
+import (
+ "context"
+ "errors"
+ ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/fs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/spf13/afero"
+ "os"
+ "time"
+)
+
+type AferoAdapter struct {
+ ctx context.Context
+ nextFileSize int64
+}
+
+func NewAferoAdapter(ctx context.Context) *AferoAdapter {
+ return &AferoAdapter{ctx: ctx}
+}
+
+func (a *AferoAdapter) Create(_ string) (afero.File, error) {
+ // See also GetHandle
+ return nil, errs.NotImplement
+}
+
+func (a *AferoAdapter) Mkdir(name string, _ os.FileMode) error {
+ return Mkdir(a.ctx, name)
+}
+
+func (a *AferoAdapter) MkdirAll(path string, perm os.FileMode) error {
+ return a.Mkdir(path, perm)
+}
+
+func (a *AferoAdapter) Open(_ string) (afero.File, error) {
+ // See also GetHandle and ReadDir
+ return nil, errs.NotImplement
+}
+
+func (a *AferoAdapter) OpenFile(_ string, _ int, _ os.FileMode) (afero.File, error) {
+ // See also GetHandle
+ return nil, errs.NotImplement
+}
+
+func (a *AferoAdapter) Remove(name string) error {
+ return Remove(a.ctx, name)
+}
+
+func (a *AferoAdapter) RemoveAll(path string) error {
+ return a.Remove(path)
+}
+
+func (a *AferoAdapter) Rename(oldName, newName string) error {
+ return Rename(a.ctx, oldName, newName)
+}
+
+func (a *AferoAdapter) Stat(name string) (os.FileInfo, error) {
+ return Stat(a.ctx, name)
+}
+
+func (a *AferoAdapter) Name() string {
+ return "AList FTP Endpoint"
+}
+
+func (a *AferoAdapter) Chmod(_ string, _ os.FileMode) error {
+ return errs.NotSupport
+}
+
+func (a *AferoAdapter) Chown(_ string, _, _ int) error {
+ return errs.NotSupport
+}
+
+func (a *AferoAdapter) Chtimes(_ string, _ time.Time, _ time.Time) error {
+ return errs.NotSupport
+}
+
+func (a *AferoAdapter) ReadDir(name string) ([]os.FileInfo, error) {
+ return List(a.ctx, name)
+}
+
+func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserver.FileTransfer, error) {
+ fileSize := a.nextFileSize
+ a.nextFileSize = 0
+ if (flags & os.O_SYNC) != 0 {
+ return nil, errs.NotSupport
+ }
+ if (flags & os.O_APPEND) != 0 {
+ return nil, errs.NotSupport
+ }
+ user := a.ctx.Value("user").(*model.User)
+ path, err := user.JoinPath(name)
+ if err != nil {
+ return nil, err
+ }
+ _, err = fs.Get(a.ctx, path, &fs.GetArgs{})
+ exists := err == nil
+ if (flags&os.O_CREATE) == 0 && !exists {
+ return nil, errs.ObjectNotFound
+ }
+ if (flags&os.O_EXCL) != 0 && exists {
+ return nil, errors.New("file already exists")
+ }
+ if (flags & os.O_WRONLY) != 0 {
+ if offset != 0 {
+ return nil, errs.NotSupport
+ }
+ trunc := (flags & os.O_TRUNC) != 0
+ if fileSize > 0 {
+ return OpenUploadWithLength(a.ctx, path, trunc, fileSize)
+ } else {
+ return OpenUpload(a.ctx, path, trunc)
+ }
+ }
+ return OpenDownload(a.ctx, path, offset)
+}
+
+func (a *AferoAdapter) SetNextFileSize(size int64) {
+ a.nextFileSize = size
+}
diff --git a/server/ftp/fsmanage.go b/server/ftp/fsmanage.go
new file mode 100644
index 00000000..fb03c1b9
--- /dev/null
+++ b/server/ftp/fsmanage.go
@@ -0,0 +1,82 @@
+package ftp
+
+import (
+ "context"
+ "fmt"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/fs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/server/common"
+ "github.com/pkg/errors"
+ stdpath "path"
+)
+
+func Mkdir(ctx context.Context, path string) error {
+ user := ctx.Value("user").(*model.User)
+ reqPath, err := user.JoinPath(path)
+ if err != nil {
+ return err
+ }
+ if !user.CanWrite() || !user.CanFTPManage() {
+ meta, err := op.GetNearestMeta(stdpath.Dir(reqPath))
+ if err != nil {
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ return err
+ }
+ }
+ if !common.CanWrite(meta, reqPath) {
+ return errs.PermissionDenied
+ }
+ }
+ return fs.MakeDir(ctx, reqPath)
+}
+
+func Remove(ctx context.Context, path string) error {
+ user := ctx.Value("user").(*model.User)
+ if !user.CanRemove() || !user.CanFTPManage() {
+ return errs.PermissionDenied
+ }
+ reqPath, err := user.JoinPath(path)
+ if err != nil {
+ return err
+ }
+ return fs.Remove(ctx, reqPath)
+}
+
+func Rename(ctx context.Context, oldPath, newPath string) error {
+ user := ctx.Value("user").(*model.User)
+ srcPath, err := user.JoinPath(oldPath)
+ if err != nil {
+ return err
+ }
+ dstPath, err := user.JoinPath(newPath)
+ if err != nil {
+ return err
+ }
+ srcDir, srcBase := stdpath.Split(srcPath)
+ dstDir, dstBase := stdpath.Split(dstPath)
+ if srcDir == dstDir {
+ if !user.CanRename() || !user.CanFTPManage() {
+ return errs.PermissionDenied
+ }
+ return fs.Rename(ctx, srcPath, dstBase)
+ } else {
+ if !user.CanFTPManage() || !user.CanMove() || (srcBase != dstBase && !user.CanRename()) {
+ return errs.PermissionDenied
+ }
+ if err = fs.Move(ctx, srcPath, dstDir); err != nil {
+ if srcBase != dstBase {
+ return err
+ }
+ if _, err1 := fs.Copy(ctx, srcPath, dstDir); err1 != nil {
+ return fmt.Errorf("failed move for %+v, and failed try copying for %+v", err, err1)
+ }
+ return nil
+ }
+ if srcBase != dstBase {
+ return fs.Rename(ctx, stdpath.Join(dstDir, srcBase), dstBase)
+ }
+ return nil
+ }
+}
diff --git a/server/ftp/fsread.go b/server/ftp/fsread.go
new file mode 100644
index 00000000..c051a19d
--- /dev/null
+++ b/server/ftp/fsread.go
@@ -0,0 +1,163 @@
+package ftp
+
+import (
+ "context"
+ ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/fs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/server/common"
+ "github.com/pkg/errors"
+ fs2 "io/fs"
+ "net/http"
+ "os"
+ "time"
+)
+
+type FileDownloadProxy struct {
+ ftpserver.FileTransfer
+ reader stream.SStreamReadAtSeeker
+}
+
+func OpenDownload(ctx context.Context, reqPath string, offset int64) (*FileDownloadProxy, error) {
+ user := ctx.Value("user").(*model.User)
+ meta, err := op.GetNearestMeta(reqPath)
+ if err != nil {
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ return nil, err
+ }
+ }
+ ctx = context.WithValue(ctx, "meta", meta)
+ if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) {
+ return nil, errs.PermissionDenied
+ }
+
+ // directly use proxy
+ header := *(ctx.Value("proxy_header").(*http.Header))
+ link, obj, err := fs.Link(ctx, reqPath, model.LinkArgs{
+ IP: ctx.Value("client_ip").(string),
+ Header: header,
+ })
+ if err != nil {
+ return nil, err
+ }
+ fileStream := stream.FileStream{
+ Obj: obj,
+ Ctx: ctx,
+ }
+ ss, err := stream.NewSeekableStream(fileStream, link)
+ if err != nil {
+ return nil, err
+ }
+ reader, err := stream.NewReadAtSeeker(ss, offset)
+ if err != nil {
+ _ = ss.Close()
+ return nil, err
+ }
+ return &FileDownloadProxy{reader: reader}, nil
+}
+
+func (f *FileDownloadProxy) Read(p []byte) (n int, err error) {
+ n, err = f.reader.Read(p)
+ if err != nil {
+ return
+ }
+ err = stream.ClientDownloadLimit.WaitN(f.reader.GetRawStream().Ctx, n)
+ return
+}
+
+func (f *FileDownloadProxy) Write(p []byte) (n int, err error) {
+ return 0, errs.NotSupport
+}
+
+func (f *FileDownloadProxy) Seek(offset int64, whence int) (int64, error) {
+ return f.reader.Seek(offset, whence)
+}
+
+func (f *FileDownloadProxy) Close() error {
+ return f.reader.Close()
+}
+
+type OsFileInfoAdapter struct {
+ obj model.Obj
+}
+
+func (o *OsFileInfoAdapter) Name() string {
+ return o.obj.GetName()
+}
+
+func (o *OsFileInfoAdapter) Size() int64 {
+ return o.obj.GetSize()
+}
+
+func (o *OsFileInfoAdapter) Mode() fs2.FileMode {
+ var mode fs2.FileMode = 0755
+ if o.IsDir() {
+ mode |= fs2.ModeDir
+ }
+ return mode
+}
+
+func (o *OsFileInfoAdapter) ModTime() time.Time {
+ return o.obj.ModTime()
+}
+
+func (o *OsFileInfoAdapter) IsDir() bool {
+ return o.obj.IsDir()
+}
+
+func (o *OsFileInfoAdapter) Sys() any {
+ return o.obj
+}
+
+func Stat(ctx context.Context, path string) (os.FileInfo, error) {
+ user := ctx.Value("user").(*model.User)
+ reqPath, err := user.JoinPath(path)
+ if err != nil {
+ return nil, err
+ }
+ meta, err := op.GetNearestMeta(reqPath)
+ if err != nil {
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ return nil, err
+ }
+ }
+ ctx = context.WithValue(ctx, "meta", meta)
+ if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) {
+ return nil, errs.PermissionDenied
+ }
+ obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{})
+ if err != nil {
+ return nil, err
+ }
+ return &OsFileInfoAdapter{obj: obj}, nil
+}
+
+func List(ctx context.Context, path string) ([]os.FileInfo, error) {
+ user := ctx.Value("user").(*model.User)
+ reqPath, err := user.JoinPath(path)
+ if err != nil {
+ return nil, err
+ }
+ meta, err := op.GetNearestMeta(reqPath)
+ if err != nil {
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ return nil, err
+ }
+ }
+ ctx = context.WithValue(ctx, "meta", meta)
+ if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) {
+ return nil, errs.PermissionDenied
+ }
+ objs, err := fs.List(ctx, reqPath, &fs.ListArgs{})
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]os.FileInfo, len(objs))
+ for i, obj := range objs {
+ ret[i] = &OsFileInfoAdapter{obj: obj}
+ }
+ return ret, nil
+}
diff --git a/server/ftp/fsup.go b/server/ftp/fsup.go
new file mode 100644
index 00000000..ee38b1bf
--- /dev/null
+++ b/server/ftp/fsup.go
@@ -0,0 +1,218 @@
+package ftp
+
+import (
+ "bytes"
+ "context"
+ ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/fs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/server/common"
+ "github.com/pkg/errors"
+ "io"
+ "net/http"
+ "os"
+ stdpath "path"
+ "time"
+)
+
+type FileUploadProxy struct {
+ ftpserver.FileTransfer
+ buffer *os.File
+ path string
+ ctx context.Context
+ trunc bool
+}
+
+func uploadAuth(ctx context.Context, path string) error {
+ user := ctx.Value("user").(*model.User)
+ meta, err := op.GetNearestMeta(stdpath.Dir(path))
+ if err != nil {
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ return err
+ }
+ }
+ if !(common.CanAccess(user, meta, path, ctx.Value("meta_pass").(string)) &&
+ ((user.CanFTPManage() && user.CanWrite()) || common.CanWrite(meta, stdpath.Dir(path)))) {
+ return errs.PermissionDenied
+ }
+ return nil
+}
+
+func OpenUpload(ctx context.Context, path string, trunc bool) (*FileUploadProxy, error) {
+ err := uploadAuth(ctx, path)
+ if err != nil {
+ return nil, err
+ }
+ tmpFile, err := os.CreateTemp(conf.Conf.TempDir, "file-*")
+ if err != nil {
+ return nil, err
+ }
+ return &FileUploadProxy{buffer: tmpFile, path: path, ctx: ctx, trunc: trunc}, nil
+}
+
+func (f *FileUploadProxy) Read(p []byte) (n int, err error) {
+ return 0, errs.NotSupport
+}
+
+func (f *FileUploadProxy) Write(p []byte) (n int, err error) {
+ n, err = f.buffer.Write(p)
+ if err != nil {
+ return
+ }
+ err = stream.ClientUploadLimit.WaitN(f.ctx, n)
+ return
+}
+
+func (f *FileUploadProxy) Seek(offset int64, whence int) (int64, error) {
+ return f.buffer.Seek(offset, whence)
+}
+
+func (f *FileUploadProxy) Close() error {
+ dir, name := stdpath.Split(f.path)
+ size, err := f.buffer.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ if _, err := f.buffer.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ arr := make([]byte, 512)
+ if _, err := f.buffer.Read(arr); err != nil {
+ return err
+ }
+ contentType := http.DetectContentType(arr)
+ if _, err := f.buffer.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ if f.trunc {
+ _ = fs.Remove(f.ctx, f.path)
+ }
+ s := &stream.FileStream{
+ Obj: &model.Object{
+ Name: name,
+ Size: size,
+ Modified: time.Now(),
+ },
+ Mimetype: contentType,
+ WebPutAsTask: true,
+ }
+ s.SetTmpFile(f.buffer)
+ _, err = fs.PutAsTask(f.ctx, dir, s)
+ return err
+}
+
+type FileUploadWithLengthProxy struct {
+ ftpserver.FileTransfer
+ ctx context.Context
+ path string
+ length int64
+ first512Bytes [512]byte
+ pFirst int
+ pipeWriter io.WriteCloser
+ errChan chan error
+}
+
+func OpenUploadWithLength(ctx context.Context, path string, trunc bool, length int64) (*FileUploadWithLengthProxy, error) {
+ err := uploadAuth(ctx, path)
+ if err != nil {
+ return nil, err
+ }
+ if trunc {
+ _ = fs.Remove(ctx, path)
+ }
+ return &FileUploadWithLengthProxy{ctx: ctx, path: path, length: length}, nil
+}
+
+func (f *FileUploadWithLengthProxy) Read(p []byte) (n int, err error) {
+ return 0, errs.NotSupport
+}
+
+func (f *FileUploadWithLengthProxy) write(p []byte) (n int, err error) {
+ if f.pipeWriter != nil {
+ select {
+ case e := <-f.errChan:
+ return 0, e
+ default:
+ return f.pipeWriter.Write(p)
+ }
+ } else if len(p) < 512-f.pFirst {
+ copy(f.first512Bytes[f.pFirst:], p)
+ f.pFirst += len(p)
+ return len(p), nil
+ } else {
+ copy(f.first512Bytes[f.pFirst:], p[:512-f.pFirst])
+ contentType := http.DetectContentType(f.first512Bytes[:])
+ dir, name := stdpath.Split(f.path)
+ reader, writer := io.Pipe()
+ f.errChan = make(chan error, 1)
+ s := &stream.FileStream{
+ Obj: &model.Object{
+ Name: name,
+ Size: f.length,
+ Modified: time.Now(),
+ },
+ Mimetype: contentType,
+ WebPutAsTask: false,
+ Reader: reader,
+ }
+ go func() {
+ e := fs.PutDirectly(f.ctx, dir, s, true)
+ f.errChan <- e
+ close(f.errChan)
+ }()
+ f.pipeWriter = writer
+ n, err = writer.Write(f.first512Bytes[:])
+ if err != nil {
+ return n, err
+ }
+ n1, err := writer.Write(p[512-f.pFirst:])
+ if err != nil {
+ return n1 + 512 - f.pFirst, err
+ }
+ f.pFirst = 512
+ return len(p), nil
+ }
+}
+
+func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) {
+ n, err = f.write(p)
+ if err != nil {
+ return
+ }
+ err = stream.ClientUploadLimit.WaitN(f.ctx, n)
+ return
+}
+
+func (f *FileUploadWithLengthProxy) Seek(offset int64, whence int) (int64, error) {
+ return 0, errs.NotSupport
+}
+
+func (f *FileUploadWithLengthProxy) Close() error {
+ if f.pipeWriter != nil {
+ err := f.pipeWriter.Close()
+ if err != nil {
+ return err
+ }
+ err = <-f.errChan
+ return err
+ } else {
+ data := f.first512Bytes[:f.pFirst]
+ contentType := http.DetectContentType(data)
+ dir, name := stdpath.Split(f.path)
+ s := &stream.FileStream{
+ Obj: &model.Object{
+ Name: name,
+ Size: int64(f.pFirst),
+ Modified: time.Now(),
+ },
+ Mimetype: contentType,
+ WebPutAsTask: false,
+ Reader: bytes.NewReader(data),
+ }
+ return fs.PutDirectly(f.ctx, dir, s, true)
+ }
+}
diff --git a/server/ftp/site.go b/server/ftp/site.go
new file mode 100644
index 00000000..8ea667d8
--- /dev/null
+++ b/server/ftp/site.go
@@ -0,0 +1,21 @@
+package ftp
+
+import (
+ "fmt"
+ ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
+ "strconv"
+)
+
+func HandleSIZE(param string, client ftpserver.ClientDriver) (int, string) {
+ fs, ok := client.(*AferoAdapter)
+ if !ok {
+ return ftpserver.StatusNotLoggedIn, "Unexpected exception (driver is nil)"
+ }
+ size, err := strconv.ParseInt(param, 10, 64)
+ if err != nil {
+ return ftpserver.StatusSyntaxErrorParameters, fmt.Sprintf(
+ "Couldn't parse file size, given: %s, err: %v", param, err)
+ }
+ fs.SetNextFileSize(size)
+ return ftpserver.StatusOK, "Accepted next file size"
+}
diff --git a/server/handles/archive.go b/server/handles/archive.go
new file mode 100644
index 00000000..550bc3ce
--- /dev/null
+++ b/server/handles/archive.go
@@ -0,0 +1,409 @@
+package handles
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/alist-org/alist/v3/internal/task"
+ "net/url"
+ stdpath "path"
+
+ "github.com/alist-org/alist/v3/internal/archive/tool"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/fs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "github.com/alist-org/alist/v3/internal/sign"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/alist-org/alist/v3/server/common"
+ "github.com/gin-gonic/gin"
+ "github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
+)
+
+type ArchiveMetaReq struct {
+ Path string `json:"path" form:"path"`
+ Password string `json:"password" form:"password"`
+ Refresh bool `json:"refresh" form:"refresh"`
+ ArchivePass string `json:"archive_pass" form:"archive_pass"`
+}
+
+type ArchiveMetaResp struct {
+ Comment string `json:"comment"`
+ IsEncrypted bool `json:"encrypted"`
+ Content []ArchiveContentResp `json:"content"`
+ Sort *model.Sort `json:"sort,omitempty"`
+ RawURL string `json:"raw_url"`
+ Sign string `json:"sign"`
+}
+
+type ArchiveContentResp struct {
+ ObjResp
+ Children []ArchiveContentResp `json:"children"`
+}
+
+func toObjsRespWithoutSignAndThumb(obj model.Obj) ObjResp {
+ return ObjResp{
+ Name: obj.GetName(),
+ Size: obj.GetSize(),
+ IsDir: obj.IsDir(),
+ Modified: obj.ModTime(),
+ Created: obj.CreateTime(),
+ HashInfoStr: obj.GetHash().String(),
+ HashInfo: obj.GetHash().Export(),
+ Sign: "",
+ Thumb: "",
+ Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
+ }
+}
+
+func toContentResp(objs []model.ObjTree) []ArchiveContentResp {
+ if objs == nil {
+ return nil
+ }
+ ret, _ := utils.SliceConvert(objs, func(src model.ObjTree) (ArchiveContentResp, error) {
+ return ArchiveContentResp{
+ ObjResp: toObjsRespWithoutSignAndThumb(src),
+ Children: toContentResp(src.GetChildren()),
+ }, nil
+ })
+ return ret
+}
+
+func FsArchiveMeta(c *gin.Context) {
+ var req ArchiveMetaReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+ user := c.MustGet("user").(*model.User)
+ if !user.CanReadArchives() {
+ common.ErrorResp(c, errs.PermissionDenied, 403)
+ return
+ }
+ reqPath, err := user.JoinPath(req.Path)
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
+ meta, err := op.GetNearestMeta(reqPath)
+ if err != nil {
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ common.ErrorResp(c, err, 500, true)
+ return
+ }
+ }
+ c.Set("meta", meta)
+ if !common.CanAccess(user, meta, reqPath, req.Password) {
+ common.ErrorStrResp(c, "password is incorrect or you have no permission", 403)
+ return
+ }
+ archiveArgs := model.ArchiveArgs{
+ LinkArgs: model.LinkArgs{
+ Header: c.Request.Header,
+ Type: c.Query("type"),
+ HttpReq: c.Request,
+ },
+ Password: req.ArchivePass,
+ }
+ ret, err := fs.ArchiveMeta(c, reqPath, model.ArchiveMetaArgs{
+ ArchiveArgs: archiveArgs,
+ Refresh: req.Refresh,
+ })
+ if err != nil {
+ if errors.Is(err, errs.WrongArchivePassword) {
+ common.ErrorResp(c, err, 202)
+ } else {
+ common.ErrorResp(c, err, 500)
+ }
+ return
+ }
+ s := ""
+ if isEncrypt(meta, reqPath) || setting.GetBool(conf.SignAll) {
+ s = sign.SignArchive(reqPath)
+ }
+ api := "/ae"
+ if ret.DriverProviding {
+ api = "/ad"
+ }
+ common.SuccessResp(c, ArchiveMetaResp{
+ Comment: ret.GetComment(),
+ IsEncrypted: ret.IsEncrypted(),
+ Content: toContentResp(ret.GetTree()),
+ Sort: ret.Sort,
+ RawURL: fmt.Sprintf("%s%s%s", common.GetApiUrl(c.Request), api, utils.EncodePath(reqPath, true)),
+ Sign: s,
+ })
+}
+
+type ArchiveListReq struct {
+ ArchiveMetaReq
+ model.PageReq
+ InnerPath string `json:"inner_path" form:"inner_path"`
+}
+
+type ArchiveListResp struct {
+ Content []ObjResp `json:"content"`
+ Total int64 `json:"total"`
+}
+
+func FsArchiveList(c *gin.Context) {
+ var req ArchiveListReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+ req.Validate()
+ user := c.MustGet("user").(*model.User)
+ if !user.CanReadArchives() {
+ common.ErrorResp(c, errs.PermissionDenied, 403)
+ return
+ }
+ reqPath, err := user.JoinPath(req.Path)
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
+ meta, err := op.GetNearestMeta(reqPath)
+ if err != nil {
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ common.ErrorResp(c, err, 500, true)
+ return
+ }
+ }
+ c.Set("meta", meta)
+ if !common.CanAccess(user, meta, reqPath, req.Password) {
+ common.ErrorStrResp(c, "password is incorrect or you have no permission", 403)
+ return
+ }
+ objs, err := fs.ArchiveList(c, reqPath, model.ArchiveListArgs{
+ ArchiveInnerArgs: model.ArchiveInnerArgs{
+ ArchiveArgs: model.ArchiveArgs{
+ LinkArgs: model.LinkArgs{
+ Header: c.Request.Header,
+ Type: c.Query("type"),
+ HttpReq: c.Request,
+ },
+ Password: req.ArchivePass,
+ },
+ InnerPath: utils.FixAndCleanPath(req.InnerPath),
+ },
+ Refresh: req.Refresh,
+ })
+ if err != nil {
+ if errors.Is(err, errs.WrongArchivePassword) {
+ common.ErrorResp(c, err, 202)
+ } else {
+ common.ErrorResp(c, err, 500)
+ }
+ return
+ }
+ total, objs := pagination(objs, &req.PageReq)
+ ret, _ := utils.SliceConvert(objs, func(src model.Obj) (ObjResp, error) {
+ return toObjsRespWithoutSignAndThumb(src), nil
+ })
+ common.SuccessResp(c, ArchiveListResp{
+ Content: ret,
+ Total: int64(total),
+ })
+}
+
+type StringOrArray []string
+
+func (s *StringOrArray) UnmarshalJSON(data []byte) error {
+ var value string
+ if err := json.Unmarshal(data, &value); err == nil {
+ *s = []string{value}
+ return nil
+ }
+ var sliceValue []string
+ if err := json.Unmarshal(data, &sliceValue); err != nil {
+ return err
+ }
+ *s = sliceValue
+ return nil
+}
+
+type ArchiveDecompressReq struct {
+ SrcDir string `json:"src_dir" form:"src_dir"`
+ DstDir string `json:"dst_dir" form:"dst_dir"`
+ Name StringOrArray `json:"name" form:"name"`
+ ArchivePass string `json:"archive_pass" form:"archive_pass"`
+ InnerPath string `json:"inner_path" form:"inner_path"`
+ CacheFull bool `json:"cache_full" form:"cache_full"`
+ PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
+}
+
+func FsArchiveDecompress(c *gin.Context) {
+ var req ArchiveDecompressReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+ user := c.MustGet("user").(*model.User)
+ if !user.CanDecompress() {
+ common.ErrorResp(c, errs.PermissionDenied, 403)
+ return
+ }
+ srcPaths := make([]string, 0, len(req.Name))
+ for _, name := range req.Name {
+ srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, name))
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
+ srcPaths = append(srcPaths, srcPath)
+ }
+ dstDir, err := user.JoinPath(req.DstDir)
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
+ tasks := make([]task.TaskExtensionInfo, 0, len(srcPaths))
+ for _, srcPath := range srcPaths {
+ t, e := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{
+ ArchiveInnerArgs: model.ArchiveInnerArgs{
+ ArchiveArgs: model.ArchiveArgs{
+ LinkArgs: model.LinkArgs{
+ Header: c.Request.Header,
+ Type: c.Query("type"),
+ HttpReq: c.Request,
+ },
+ Password: req.ArchivePass,
+ },
+ InnerPath: utils.FixAndCleanPath(req.InnerPath),
+ },
+ CacheFull: req.CacheFull,
+ PutIntoNewDir: req.PutIntoNewDir,
+ })
+ if e != nil {
+ if errors.Is(e, errs.WrongArchivePassword) {
+ common.ErrorResp(c, e, 202)
+ } else {
+ common.ErrorResp(c, e, 500)
+ }
+ return
+ }
+ if t != nil {
+ tasks = append(tasks, t)
+ }
+ }
+ common.SuccessResp(c, gin.H{
+ "task": getTaskInfos(tasks),
+ })
+}
+
+func ArchiveDown(c *gin.Context) {
+ archiveRawPath := c.MustGet("path").(string)
+ innerPath := utils.FixAndCleanPath(c.Query("inner"))
+ password := c.Query("pass")
+ filename := stdpath.Base(innerPath)
+ storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{})
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ if common.ShouldProxy(storage, filename) {
+ ArchiveProxy(c)
+ return
+ } else {
+ link, _, err := fs.ArchiveDriverExtract(c, archiveRawPath, model.ArchiveInnerArgs{
+ ArchiveArgs: model.ArchiveArgs{
+ LinkArgs: model.LinkArgs{
+ IP: c.ClientIP(),
+ Header: c.Request.Header,
+ Type: c.Query("type"),
+ HttpReq: c.Request,
+ Redirect: true,
+ },
+ Password: password,
+ },
+ InnerPath: innerPath,
+ })
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ down(c, link)
+ }
+}
+
+func ArchiveProxy(c *gin.Context) {
+ archiveRawPath := c.MustGet("path").(string)
+ innerPath := utils.FixAndCleanPath(c.Query("inner"))
+ password := c.Query("pass")
+ filename := stdpath.Base(innerPath)
+ storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{})
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ if canProxy(storage, filename) {
+ // TODO: Support external download proxy URL
+ link, file, err := fs.ArchiveDriverExtract(c, archiveRawPath, model.ArchiveInnerArgs{
+ ArchiveArgs: model.ArchiveArgs{
+ LinkArgs: model.LinkArgs{
+ Header: c.Request.Header,
+ Type: c.Query("type"),
+ HttpReq: c.Request,
+ },
+ Password: password,
+ },
+ InnerPath: innerPath,
+ })
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ localProxy(c, link, file, storage.GetStorage().ProxyRange)
+ } else {
+ common.ErrorStrResp(c, "proxy not allowed", 403)
+ return
+ }
+}
+
+func ArchiveInternalExtract(c *gin.Context) {
+ archiveRawPath := c.MustGet("path").(string)
+ innerPath := utils.FixAndCleanPath(c.Query("inner"))
+ password := c.Query("pass")
+ rc, size, err := fs.ArchiveInternalExtract(c, archiveRawPath, model.ArchiveInnerArgs{
+ ArchiveArgs: model.ArchiveArgs{
+ LinkArgs: model.LinkArgs{
+ Header: c.Request.Header,
+ Type: c.Query("type"),
+ HttpReq: c.Request,
+ },
+ Password: password,
+ },
+ InnerPath: innerPath,
+ })
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ defer func() {
+ if err := rc.Close(); err != nil {
+ log.Errorf("failed to close file streamer, %v", err)
+ }
+ }()
+ headers := map[string]string{
+ "Referrer-Policy": "no-referrer",
+ "Cache-Control": "max-age=0, no-cache, no-store, must-revalidate",
+ }
+ filename := stdpath.Base(innerPath)
+ headers["Content-Disposition"] = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename))
+ contentType := c.Request.Header.Get("Content-Type")
+ if contentType == "" {
+ contentType = utils.GetMimeType(filename)
+ }
+ c.DataFromReader(200, size, contentType, rc, headers)
+}
+
+func ArchiveExtensions(c *gin.Context) {
+ var ext []string
+ for key := range tool.Tools {
+ ext = append(ext, key)
+ }
+ common.SuccessResp(c, ext)
+}
diff --git a/server/handles/auth.go b/server/handles/auth.go
index e1f512c4..7a2c0fb5 100644
--- a/server/handles/auth.go
+++ b/server/handles/auth.go
@@ -113,6 +113,10 @@ func UpdateCurrent(c *gin.Context) {
return
}
user := c.MustGet("user").(*model.User)
+ if user.IsGuest() {
+ common.ErrorStrResp(c, "Guest user can not update profile", 403)
+ return
+ }
user.Username = req.Username
if req.Password != "" {
user.SetPassword(req.Password)
diff --git a/server/handles/const.go b/server/handles/const.go
new file mode 100644
index 00000000..b108c9da
--- /dev/null
+++ b/server/handles/const.go
@@ -0,0 +1,7 @@
+package handles
+
+const (
+ CANCEL = "cancel"
+ OVERWRITE = "overwrite"
+ SKIP = "skip"
+)
diff --git a/server/handles/down.go b/server/handles/down.go
index 0020ed14..2c5c2faf 100644
--- a/server/handles/down.go
+++ b/server/handles/down.go
@@ -1,9 +1,11 @@
package handles
import (
+ "bytes"
"fmt"
"io"
stdpath "path"
+ "strconv"
"strings"
"github.com/alist-org/alist/v3/internal/conf"
@@ -15,7 +17,9 @@ import (
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
"github.com/gin-gonic/gin"
+ "github.com/microcosm-cc/bluemonday"
log "github.com/sirupsen/logrus"
+ "github.com/yuin/goldmark"
)
func Down(c *gin.Context) {
@@ -31,37 +35,17 @@ func Down(c *gin.Context) {
return
} else {
link, _, err := fs.Link(c, rawPath, model.LinkArgs{
- IP: c.ClientIP(),
- Header: c.Request.Header,
- Type: c.Query("type"),
- HttpReq: c.Request,
+ IP: c.ClientIP(),
+ Header: c.Request.Header,
+ Type: c.Query("type"),
+ HttpReq: c.Request,
+ Redirect: true,
})
if err != nil {
common.ErrorResp(c, err, 500)
return
}
- if link.MFile != nil {
- defer func(ReadSeekCloser io.ReadCloser) {
- err := ReadSeekCloser.Close()
- if err != nil {
- log.Errorf("close data error: %s", err)
- }
- }(link.MFile)
- }
- c.Header("Referrer-Policy", "no-referrer")
- c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate")
- if setting.GetBool(conf.ForwardDirectLinkParams) {
- query := c.Request.URL.Query()
- for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] {
- query.Del(v)
- }
- link.URL, err = utils.InjectQuery(link.URL, query)
- if err != nil {
- common.ErrorResp(c, err, 500)
- return
- }
- }
- c.Redirect(302, link.URL)
+ down(c, link)
}
}
@@ -95,31 +79,94 @@ func Proxy(c *gin.Context) {
common.ErrorResp(c, err, 500)
return
}
- if link.URL != "" && setting.GetBool(conf.ForwardDirectLinkParams) {
- query := c.Request.URL.Query()
- for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] {
- query.Del(v)
- }
- link.URL, err = utils.InjectQuery(link.URL, query)
- if err != nil {
- common.ErrorResp(c, err, 500)
- return
- }
- }
- if storage.GetStorage().ProxyRange {
- common.ProxyRange(link, file.GetSize())
- }
- err = common.Proxy(c.Writer, c.Request, link, file)
- if err != nil {
- common.ErrorResp(c, err, 500, true)
- return
- }
+ localProxy(c, link, file, storage.GetStorage().ProxyRange)
} else {
common.ErrorStrResp(c, "proxy not allowed", 403)
return
}
}
+func down(c *gin.Context, link *model.Link) {
+ var err error
+ if link.MFile != nil {
+ defer func(ReadSeekCloser io.ReadCloser) {
+ err := ReadSeekCloser.Close()
+ if err != nil {
+ log.Errorf("close data error: %s", err)
+ }
+ }(link.MFile)
+ }
+ c.Header("Referrer-Policy", "no-referrer")
+ c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate")
+ if setting.GetBool(conf.ForwardDirectLinkParams) {
+ query := c.Request.URL.Query()
+ for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] {
+ query.Del(v)
+ }
+ link.URL, err = utils.InjectQuery(link.URL, query)
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ }
+ c.Redirect(302, link.URL)
+}
+
+func localProxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
+ var err error
+ if link.URL != "" && setting.GetBool(conf.ForwardDirectLinkParams) {
+ query := c.Request.URL.Query()
+ for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] {
+ query.Del(v)
+ }
+ link.URL, err = utils.InjectQuery(link.URL, query)
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ }
+ if proxyRange {
+ common.ProxyRange(link, file.GetSize())
+ }
+ Writer := &common.WrittenResponseWriter{ResponseWriter: c.Writer}
+
+ //优先处理md文件
+ if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) {
+ buf := bytes.NewBuffer(make([]byte, 0, file.GetSize()))
+ w := &common.InterceptResponseWriter{ResponseWriter: Writer, Writer: buf}
+ err = common.Proxy(w, c.Request, link, file)
+ if err == nil && buf.Len() > 0 {
+ if c.Writer.Status() < 200 || c.Writer.Status() > 300 {
+ c.Writer.Write(buf.Bytes())
+ return
+ }
+
+ var html bytes.Buffer
+ if err = goldmark.Convert(buf.Bytes(), &html); err != nil {
+ err = fmt.Errorf("markdown conversion failed: %w", err)
+ } else {
+ buf.Reset()
+ err = bluemonday.UGCPolicy().SanitizeReaderToWriter(&html, buf)
+ if err == nil {
+ Writer.Header().Set("Content-Length", strconv.FormatInt(int64(buf.Len()), 10))
+ Writer.Header().Set("Content-Type", "text/html; charset=utf-8")
+ _, err = utils.CopyWithBuffer(Writer, buf)
+ }
+ }
+ }
+ } else {
+ err = common.Proxy(Writer, c.Request, link, file)
+ }
+ if err == nil {
+ return
+ }
+ if Writer.IsWritten() {
+ log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
+ } else {
+ common.ErrorResp(c, err, 500, true)
+ }
+}
+
// TODO need optimize
// when can be proxy?
// 1. text file
diff --git a/server/handles/fsbatch.go b/server/handles/fsbatch.go
index fa7971df..3841bff5 100644
--- a/server/handles/fsbatch.go
+++ b/server/handles/fsbatch.go
@@ -3,6 +3,7 @@ package handles
import (
"fmt"
"regexp"
+ "slices"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs"
@@ -14,6 +15,125 @@ import (
"github.com/pkg/errors"
)
+type RecursiveMoveReq struct {
+ SrcDir string `json:"src_dir"`
+ DstDir string `json:"dst_dir"`
+ ConflictPolicy string `json:"conflict_policy"`
+}
+
+func FsRecursiveMove(c *gin.Context) {
+ var req RecursiveMoveReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+
+ user := c.MustGet("user").(*model.User)
+ if !user.CanMove() {
+ common.ErrorResp(c, errs.PermissionDenied, 403)
+ return
+ }
+ srcDir, err := user.JoinPath(req.SrcDir)
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
+ dstDir, err := user.JoinPath(req.DstDir)
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
+
+ meta, err := op.GetNearestMeta(srcDir)
+ if err != nil {
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ common.ErrorResp(c, err, 500, true)
+ return
+ }
+ }
+ c.Set("meta", meta)
+
+ rootFiles, err := fs.List(c, srcDir, &fs.ListArgs{})
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+
+ var existingFileNames []string
+ if req.ConflictPolicy != OVERWRITE {
+ dstFiles, err := fs.List(c, dstDir, &fs.ListArgs{})
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ existingFileNames = make([]string, 0, len(dstFiles))
+ for _, dstFile := range dstFiles {
+ existingFileNames = append(existingFileNames, dstFile.GetName())
+ }
+ }
+
+ // record the file path
+ filePathMap := make(map[model.Obj]string)
+ movingFiles := generic.NewQueue[model.Obj]()
+ movingFileNames := make([]string, 0, len(rootFiles))
+ for _, file := range rootFiles {
+ movingFiles.Push(file)
+ filePathMap[file] = srcDir
+ }
+
+ for !movingFiles.IsEmpty() {
+
+ movingFile := movingFiles.Pop()
+ movingFilePath := filePathMap[movingFile]
+ movingFileName := fmt.Sprintf("%s/%s", movingFilePath, movingFile.GetName())
+ if movingFile.IsDir() {
+ // directory, recursive move
+ subFilePath := movingFileName
+ subFiles, err := fs.List(c, movingFileName, &fs.ListArgs{Refresh: true})
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ for _, subFile := range subFiles {
+ movingFiles.Push(subFile)
+ filePathMap[subFile] = subFilePath
+ }
+ } else {
+ if movingFilePath == dstDir {
+ // same directory, don't move
+ continue
+ }
+
+ if slices.Contains(existingFileNames, movingFile.GetName()) {
+ if req.ConflictPolicy == CANCEL {
+ common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", movingFile.GetName()), 403)
+ return
+ } else if req.ConflictPolicy == SKIP {
+ continue
+ }
+ } else if req.ConflictPolicy != OVERWRITE {
+ existingFileNames = append(existingFileNames, movingFile.GetName())
+ }
+ movingFileNames = append(movingFileNames, movingFileName)
+
+ }
+
+ }
+
+ var count = 0
+ for i, fileName := range movingFileNames {
+ // move
+ err := fs.Move(c, fileName, dstDir, len(movingFileNames) > i+1)
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ count++
+ }
+
+ common.SuccessWithMsgResp(c, fmt.Sprintf("Successfully moved %d %s", count, common.Pluralize(count, "file", "files")))
+}
+
type BatchRenameReq struct {
SrcDir string `json:"src_dir"`
RenameObjects []struct {
@@ -61,94 +181,6 @@ func FsBatchRename(c *gin.Context) {
common.SuccessResp(c)
}
-type RecursiveMoveReq struct {
- SrcDir string `json:"src_dir"`
- DstDir string `json:"dst_dir"`
-}
-
-func FsRecursiveMove(c *gin.Context) {
- var req RecursiveMoveReq
- if err := c.ShouldBind(&req); err != nil {
- common.ErrorResp(c, err, 400)
- return
- }
-
- user := c.MustGet("user").(*model.User)
- if !user.CanMove() {
- common.ErrorResp(c, errs.PermissionDenied, 403)
- return
- }
- srcDir, err := user.JoinPath(req.SrcDir)
- if err != nil {
- common.ErrorResp(c, err, 403)
- return
- }
- dstDir, err := user.JoinPath(req.DstDir)
- if err != nil {
- common.ErrorResp(c, err, 403)
- return
- }
-
- meta, err := op.GetNearestMeta(srcDir)
- if err != nil {
- if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
- common.ErrorResp(c, err, 500, true)
- return
- }
- }
- c.Set("meta", meta)
-
- rootFiles, err := fs.List(c, srcDir, &fs.ListArgs{})
- if err != nil {
- common.ErrorResp(c, err, 500)
- return
- }
-
- // record the file path
- filePathMap := make(map[model.Obj]string)
- movingFiles := generic.NewQueue[model.Obj]()
- for _, file := range rootFiles {
- movingFiles.Push(file)
- filePathMap[file] = srcDir
- }
-
- for !movingFiles.IsEmpty() {
-
- movingFile := movingFiles.Pop()
- movingFilePath := filePathMap[movingFile]
- movingFileName := fmt.Sprintf("%s/%s", movingFilePath, movingFile.GetName())
- if movingFile.IsDir() {
- // directory, recursive move
- subFilePath := movingFileName
- subFiles, err := fs.List(c, movingFileName, &fs.ListArgs{Refresh: true})
- if err != nil {
- common.ErrorResp(c, err, 500)
- return
- }
- for _, subFile := range subFiles {
- movingFiles.Push(subFile)
- filePathMap[subFile] = subFilePath
- }
- } else {
-
- if movingFilePath == dstDir {
- // same directory, don't move
- continue
- }
-
- // move
- err := fs.Move(c, movingFileName, dstDir, movingFiles.IsEmpty())
- if err != nil {
- common.ErrorResp(c, err, 500)
- return
- }
- }
-
- }
-
- common.SuccessResp(c)
-}
-
type RegexRenameReq struct {
SrcDir string `json:"src_dir"`
SrcNameRegex string `json:"src_name_regex"`
diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go
index 3d446eda..c527464e 100644
--- a/server/handles/fsmanage.go
+++ b/server/handles/fsmanage.go
@@ -2,7 +2,7 @@ package handles
import (
"fmt"
- "github.com/xhofe/tache"
+ "github.com/alist-org/alist/v3/internal/task"
"io"
stdpath "path"
@@ -56,9 +56,10 @@ func FsMkdir(c *gin.Context) {
}
type MoveCopyReq struct {
- SrcDir string `json:"src_dir"`
- DstDir string `json:"dst_dir"`
- Names []string `json:"names"`
+ SrcDir string `json:"src_dir"`
+ DstDir string `json:"dst_dir"`
+ Names []string `json:"names"`
+ Overwrite bool `json:"overwrite"`
}
func FsMove(c *gin.Context) {
@@ -86,6 +87,14 @@ func FsMove(c *gin.Context) {
common.ErrorResp(c, err, 403)
return
}
+ if !req.Overwrite {
+ for _, name := range req.Names {
+ if res, _ := fs.Get(c, stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil {
+ common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
+ return
+ }
+ }
+ }
for i, name := range req.Names {
err := fs.Move(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1)
if err != nil {
@@ -121,7 +130,15 @@ func FsCopy(c *gin.Context) {
common.ErrorResp(c, err, 403)
return
}
- var addedTasks []tache.TaskWithInfo
+ if !req.Overwrite {
+ for _, name := range req.Names {
+ if res, _ := fs.Get(c, stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil {
+ common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
+ return
+ }
+ }
+ }
+ var addedTasks []task.TaskExtensionInfo
for i, name := range req.Names {
t, err := fs.Copy(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1)
if t != nil {
@@ -138,8 +155,9 @@ func FsCopy(c *gin.Context) {
}
type RenameReq struct {
- Path string `json:"path"`
- Name string `json:"name"`
+ Path string `json:"path"`
+ Name string `json:"name"`
+ Overwrite bool `json:"overwrite"`
}
func FsRename(c *gin.Context) {
@@ -158,6 +176,15 @@ func FsRename(c *gin.Context) {
common.ErrorResp(c, err, 403)
return
}
+ if !req.Overwrite {
+ dstPath := stdpath.Join(stdpath.Dir(reqPath), req.Name)
+ if dstPath != reqPath {
+ if res, _ := fs.Get(c, dstPath, &fs.GetArgs{NoLog: true}); res != nil {
+ common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", req.Name), 403)
+ return
+ }
+ }
+ }
if err := fs.Rename(c, reqPath, req.Name); err != nil {
common.ErrorResp(c, err, 500)
return
diff --git a/server/handles/fsread.go b/server/handles/fsread.go
index 7c580f63..73bde23b 100644
--- a/server/handles/fsread.go
+++ b/server/handles/fsread.go
@@ -33,6 +33,8 @@ type DirReq struct {
}
type ObjResp struct {
+ Id string `json:"id"`
+ Path string `json:"path"`
Name string `json:"name"`
Size int64 `json:"size"`
IsDir bool `json:"is_dir"`
@@ -210,6 +212,8 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
for _, obj := range objs {
thumb, _ := model.GetThumb(obj)
resp = append(resp, ObjResp{
+ Id: obj.GetID(),
+ Path: obj.GetPath(),
Name: obj.GetName(),
Size: obj.GetSize(),
IsDir: obj.IsDir(),
@@ -303,9 +307,10 @@ func FsGet(c *gin.Context) {
} else {
// if storage is not proxy, use raw url by fs.Link
link, _, err := fs.Link(c, reqPath, model.LinkArgs{
- IP: c.ClientIP(),
- Header: c.Request.Header,
- HttpReq: c.Request,
+ IP: c.ClientIP(),
+ Header: c.Request.Header,
+ HttpReq: c.Request,
+ Redirect: true,
})
if err != nil {
common.ErrorResp(c, err, 500)
@@ -325,6 +330,8 @@ func FsGet(c *gin.Context) {
thumb, _ := model.GetThumb(obj)
common.SuccessResp(c, FsGetResp{
ObjResp: ObjResp{
+ Id: obj.GetID(),
+ Path: obj.GetPath(),
Name: obj.GetName(),
Size: obj.GetSize(),
IsDir: obj.IsDir(),
diff --git a/server/handles/fsup.go b/server/handles/fsup.go
index ef9baa11..41344fb8 100644
--- a/server/handles/fsup.go
+++ b/server/handles/fsup.go
@@ -1,17 +1,17 @@
package handles
import (
- "github.com/xhofe/tache"
"io"
"net/url"
stdpath "path"
"strconv"
"time"
- "github.com/alist-org/alist/v3/internal/stream"
-
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/internal/task"
+ "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
"github.com/gin-gonic/gin"
)
@@ -35,12 +35,20 @@ func FsStream(c *gin.Context) {
return
}
asTask := c.GetHeader("As-Task") == "true"
+ overwrite := c.GetHeader("Overwrite") != "false"
user := c.MustGet("user").(*model.User)
path, err = user.JoinPath(path)
if err != nil {
common.ErrorResp(c, err, 403)
return
}
+ if !overwrite {
+ if res, _ := fs.Get(c, path, &fs.GetArgs{NoLog: true}); res != nil {
+ _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body)
+ common.ErrorStrResp(c, "file exists", 403)
+ return
+ }
+ }
dir, name := stdpath.Split(path)
sizeStr := c.GetHeader("Content-Length")
size, err := strconv.ParseInt(sizeStr, 10, 64)
@@ -48,19 +56,34 @@ func FsStream(c *gin.Context) {
common.ErrorResp(c, err, 400)
return
}
+ h := make(map[*utils.HashType]string)
+ if md5 := c.GetHeader("X-File-Md5"); md5 != "" {
+ h[utils.MD5] = md5
+ }
+ if sha1 := c.GetHeader("X-File-Sha1"); sha1 != "" {
+ h[utils.SHA1] = sha1
+ }
+ if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" {
+ h[utils.SHA256] = sha256
+ }
+ mimetype := c.GetHeader("Content-Type")
+ if len(mimetype) == 0 {
+ mimetype = utils.GetMimeType(name)
+ }
s := &stream.FileStream{
Obj: &model.Object{
Name: name,
Size: size,
Modified: getLastModified(c),
+ HashInfo: utils.NewHashInfoByMap(h),
},
Reader: c.Request.Body,
- Mimetype: c.GetHeader("Content-Type"),
+ Mimetype: mimetype,
WebPutAsTask: asTask,
}
- var t tache.TaskWithInfo
+ var t task.TaskExtensionInfo
if asTask {
- t, err = fs.PutAsTask(dir, s)
+ t, err = fs.PutAsTask(c, dir, s)
} else {
err = fs.PutDirectly(c, dir, s, true)
}
@@ -70,6 +93,9 @@ func FsStream(c *gin.Context) {
return
}
if t == nil {
+ if n, _ := io.ReadFull(c.Request.Body, []byte{0}); n == 1 {
+ _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body)
+ }
common.SuccessResp(c)
return
}
@@ -86,12 +112,20 @@ func FsForm(c *gin.Context) {
return
}
asTask := c.GetHeader("As-Task") == "true"
+ overwrite := c.GetHeader("Overwrite") != "false"
user := c.MustGet("user").(*model.User)
path, err = user.JoinPath(path)
if err != nil {
common.ErrorResp(c, err, 403)
return
}
+ if !overwrite {
+ if res, _ := fs.Get(c, path, &fs.GetArgs{NoLog: true}); res != nil {
+ _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body)
+ common.ErrorStrResp(c, "file exists", 403)
+ return
+ }
+ }
storage, err := fs.GetStorage(path, &fs.GetStoragesArgs{})
if err != nil {
common.ErrorResp(c, err, 400)
@@ -113,29 +147,39 @@ func FsForm(c *gin.Context) {
}
defer f.Close()
dir, name := stdpath.Split(path)
+ h := make(map[*utils.HashType]string)
+ if md5 := c.GetHeader("X-File-Md5"); md5 != "" {
+ h[utils.MD5] = md5
+ }
+ if sha1 := c.GetHeader("X-File-Sha1"); sha1 != "" {
+ h[utils.SHA1] = sha1
+ }
+ if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" {
+ h[utils.SHA256] = sha256
+ }
+ mimetype := file.Header.Get("Content-Type")
+ if len(mimetype) == 0 {
+ mimetype = utils.GetMimeType(name)
+ }
s := stream.FileStream{
Obj: &model.Object{
Name: name,
Size: file.Size,
Modified: getLastModified(c),
+ HashInfo: utils.NewHashInfoByMap(h),
},
Reader: f,
- Mimetype: file.Header.Get("Content-Type"),
+ Mimetype: mimetype,
WebPutAsTask: asTask,
}
- var t tache.TaskWithInfo
+ var t task.TaskExtensionInfo
if asTask {
s.Reader = struct {
io.Reader
}{f}
- t, err = fs.PutAsTask(dir, &s)
+ t, err = fs.PutAsTask(c, dir, &s)
} else {
- ss, err := stream.NewSeekableStream(s, nil)
- if err != nil {
- common.ErrorResp(c, err, 500)
- return
- }
- err = fs.PutDirectly(c, dir, ss, true)
+ err = fs.PutDirectly(c, dir, &s, true)
}
if err != nil {
common.ErrorResp(c, err, 500)
diff --git a/server/handles/offline_download.go b/server/handles/offline_download.go
index 0b019e9e..24ff7a05 100644
--- a/server/handles/offline_download.go
+++ b/server/handles/offline_download.go
@@ -1,13 +1,16 @@
package handles
import (
+ _115 "github.com/alist-org/alist/v3/drivers/115"
+ "github.com/alist-org/alist/v3/drivers/pikpak"
+ "github.com/alist-org/alist/v3/drivers/thunder"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/offline_download/tool"
"github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/task"
"github.com/alist-org/alist/v3/server/common"
"github.com/gin-gonic/gin"
- "github.com/xhofe/tache"
)
type SetAria2Req struct {
@@ -30,6 +33,10 @@ func SetAria2(c *gin.Context) {
return
}
_tool, err := tool.Tools.Get("aria2")
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
version, err := _tool.Init()
if err != nil {
common.ErrorResp(c, err, 500)
@@ -69,6 +76,169 @@ func SetQbittorrent(c *gin.Context) {
common.SuccessResp(c, "ok")
}
+type SetTransmissionReq struct {
+ Uri string `json:"uri" form:"uri"`
+ Seedtime string `json:"seedtime" form:"seedtime"`
+}
+
+func SetTransmission(c *gin.Context) {
+ var req SetTransmissionReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+ items := []model.SettingItem{
+ {Key: conf.TransmissionUri, Value: req.Uri, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
+ {Key: conf.TransmissionSeedtime, Value: req.Seedtime, Type: conf.TypeNumber, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
+ }
+ if err := op.SaveSettingItems(items); err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ _tool, err := tool.Tools.Get("Transmission")
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ if _, err := _tool.Init(); err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ common.SuccessResp(c, "ok")
+}
+
+type Set115Req struct {
+ TempDir string `json:"temp_dir" form:"temp_dir"`
+}
+
+func Set115(c *gin.Context) {
+ var req Set115Req
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+ if req.TempDir != "" {
+ storage, _, err := op.GetStorageAndActualPath(req.TempDir)
+ if err != nil {
+ common.ErrorStrResp(c, "storage does not exists", 400)
+ return
+ }
+ if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK {
+ common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400)
+ return
+ }
+ if _, ok := storage.(*_115.Pan115); !ok {
+ common.ErrorStrResp(c, "unsupported storage driver for offline download, only 115 Cloud is supported", 400)
+ return
+ }
+ }
+ items := []model.SettingItem{
+ {Key: conf.Pan115TempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
+ }
+ if err := op.SaveSettingItems(items); err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ _tool, err := tool.Tools.Get("115 Cloud")
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ if _, err := _tool.Init(); err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ common.SuccessResp(c, "ok")
+}
+
+type SetPikPakReq struct {
+ TempDir string `json:"temp_dir" form:"temp_dir"`
+}
+
+func SetPikPak(c *gin.Context) {
+ var req SetPikPakReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+ if req.TempDir != "" {
+ storage, _, err := op.GetStorageAndActualPath(req.TempDir)
+ if err != nil {
+ common.ErrorStrResp(c, "storage does not exists", 400)
+ return
+ }
+ if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK {
+ common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400)
+ return
+ }
+ if _, ok := storage.(*pikpak.PikPak); !ok {
+ common.ErrorStrResp(c, "unsupported storage driver for offline download, only PikPak is supported", 400)
+ return
+ }
+ }
+ items := []model.SettingItem{
+ {Key: conf.PikPakTempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
+ }
+ if err := op.SaveSettingItems(items); err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ _tool, err := tool.Tools.Get("PikPak")
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ if _, err := _tool.Init(); err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ common.SuccessResp(c, "ok")
+}
+
+type SetThunderReq struct {
+ TempDir string `json:"temp_dir" form:"temp_dir"`
+}
+
+func SetThunder(c *gin.Context) {
+ var req SetThunderReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+ if req.TempDir != "" {
+ storage, _, err := op.GetStorageAndActualPath(req.TempDir)
+ if err != nil {
+ common.ErrorStrResp(c, "storage does not exists", 400)
+ return
+ }
+ if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK {
+ common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400)
+ return
+ }
+ if _, ok := storage.(*thunder.Thunder); !ok {
+ common.ErrorStrResp(c, "unsupported storage driver for offline download, only Thunder is supported", 400)
+ return
+ }
+ }
+ items := []model.SettingItem{
+ {Key: conf.ThunderTempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
+ }
+ if err := op.SaveSettingItems(items); err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ _tool, err := tool.Tools.Get("Thunder")
+ if err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ if _, err := _tool.Init(); err != nil {
+ common.ErrorResp(c, err, 500)
+ return
+ }
+ common.SuccessResp(c, "ok")
+}
+
func OfflineDownloadTools(c *gin.Context) {
tools := tool.Tools.Names()
common.SuccessResp(c, tools)
@@ -98,7 +268,7 @@ func AddOfflineDownload(c *gin.Context) {
common.ErrorResp(c, err, 403)
return
}
- var tasks []tache.TaskWithInfo
+ var tasks []task.TaskExtensionInfo
for _, url := range req.Urls {
t, err := tool.AddURL(c, &tool.AddURLArgs{
URL: url,
@@ -110,7 +280,9 @@ func AddOfflineDownload(c *gin.Context) {
common.ErrorResp(c, err, 500)
return
}
- tasks = append(tasks, t)
+ if t != nil {
+ tasks = append(tasks, t)
+ }
}
common.SuccessResp(c, gin.H{
"tasks": getTaskInfos(tasks),
diff --git a/server/handles/sshkey.go b/server/handles/sshkey.go
new file mode 100644
index 00000000..6f8d46b4
--- /dev/null
+++ b/server/handles/sshkey.go
@@ -0,0 +1,125 @@
+package handles
+
+import (
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/server/common"
+ "github.com/gin-gonic/gin"
+ "strconv"
+ "strings"
+)
+
+type SSHKeyAddReq struct {
+ Title string `json:"title" binding:"required"`
+ Key string `json:"key" binding:"required"`
+}
+
+func AddMyPublicKey(c *gin.Context) {
+ userObj, ok := c.Value("user").(*model.User)
+ if !ok || userObj.IsGuest() {
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ var req SSHKeyAddReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorStrResp(c, "request invalid", 400)
+ return
+ }
+ if req.Title == "" {
+ common.ErrorStrResp(c, "request invalid", 400)
+ return
+ }
+ key := &model.SSHPublicKey{
+ Title: req.Title,
+ KeyStr: strings.TrimSpace(req.Key),
+ UserId: userObj.ID,
+ }
+ err, parsed := op.CreateSSHPublicKey(key)
+ if !parsed {
+ common.ErrorStrResp(c, "provided key invalid", 400)
+ return
+ } else if err != nil {
+ common.ErrorResp(c, err, 500, true)
+ return
+ }
+ common.SuccessResp(c)
+}
+
+func ListMyPublicKey(c *gin.Context) {
+ userObj, ok := c.Value("user").(*model.User)
+ if !ok || userObj.IsGuest() {
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ list(c, userObj)
+}
+
+func DeleteMyPublicKey(c *gin.Context) {
+ userObj, ok := c.Value("user").(*model.User)
+ if !ok || userObj.IsGuest() {
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ keyId, err := strconv.Atoi(c.Query("id"))
+ if err != nil {
+ common.ErrorStrResp(c, "id format invalid", 400)
+ return
+ }
+ key, err := op.GetSSHPublicKeyByIdAndUserId(uint(keyId), userObj.ID)
+ if err != nil {
+ common.ErrorStrResp(c, "failed to get public key", 404)
+ return
+ }
+ err = op.DeleteSSHPublicKeyById(key.ID)
+ if err != nil {
+ common.ErrorResp(c, err, 500, true)
+ return
+ }
+ common.SuccessResp(c)
+}
+
+func ListPublicKeys(c *gin.Context) {
+ userId, err := strconv.Atoi(c.Query("uid"))
+ if err != nil {
+ common.ErrorStrResp(c, "user id format invalid", 400)
+ return
+ }
+ userObj, err := op.GetUserById(uint(userId))
+ if err != nil {
+ common.ErrorStrResp(c, "user invalid", 404)
+ return
+ }
+ list(c, userObj)
+}
+
+func DeletePublicKey(c *gin.Context) {
+ keyId, err := strconv.Atoi(c.Query("id"))
+ if err != nil {
+ common.ErrorStrResp(c, "id format invalid", 400)
+ return
+ }
+ err = op.DeleteSSHPublicKeyById(uint(keyId))
+ if err != nil {
+ common.ErrorResp(c, err, 500, true)
+ return
+ }
+ common.SuccessResp(c)
+}
+
+func list(c *gin.Context, userObj *model.User) {
+ var req model.PageReq
+ if err := c.ShouldBind(&req); err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
+ req.Validate()
+ keys, total, err := op.GetSSHPublicKeyByUserId(userObj.ID, req.Page, req.PerPage)
+ if err != nil {
+ common.ErrorResp(c, err, 500, true)
+ return
+ }
+ common.SuccessResp(c, common.PageResp{
+ Content: keys,
+ Total: total,
+ })
+}
diff --git a/server/handles/ssologin.go b/server/handles/ssologin.go
index 70298a9c..62bd4aaa 100644
--- a/server/handles/ssologin.go
+++ b/server/handles/ssologin.go
@@ -1,7 +1,6 @@
package handles
import (
- "encoding/base32"
"encoding/base64"
"errors"
"fmt"
@@ -11,6 +10,8 @@ import (
"strings"
"time"
+ "github.com/Xhofe/go-cache"
+
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/internal/model"
@@ -21,29 +22,45 @@ import (
"github.com/coreos/go-oidc"
"github.com/gin-gonic/gin"
"github.com/go-resty/resty/v2"
- "github.com/pquerna/otp"
- "github.com/pquerna/otp/totp"
"golang.org/x/oauth2"
"gorm.io/gorm"
)
-var opts = totp.ValidateOpts{
- // state verify won't expire in 30 secs, which is quite enough for the callback
- Period: 30,
- Skew: 1,
- // in some OIDC providers(such as Authelia), state parameter must be at least 8 characters
- Digits: otp.DigitsEight,
- Algorithm: otp.AlgorithmSHA1,
+const stateLength = 16
+const stateExpire = time.Minute * 5
+
+var stateCache = cache.NewMemCache[string](cache.WithShards[string](stateLength))
+
+func _keyState(clientID, state string) string {
+ return fmt.Sprintf("%s_%s", clientID, state)
+}
+
+func generateState(clientID, ip string) string {
+ state := random.String(stateLength)
+ stateCache.Set(_keyState(clientID, state), ip, cache.WithEx[string](stateExpire))
+ return state
+}
+
+func verifyState(clientID, ip, state string) bool {
+ value, ok := stateCache.Get(_keyState(clientID, state))
+ return ok && value == ip
+}
+
+func ssoRedirectUri(c *gin.Context, useCompatibility bool, method string) string {
+ if useCompatibility {
+ return common.GetApiUrl(c.Request) + "/api/auth/" + method
+ } else {
+ return common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + method
+ }
}
func SSOLoginRedirect(c *gin.Context) {
method := c.Query("method")
- usecompatibility := setting.GetBool(conf.SSOCompatibilityMode)
+ useCompatibility := setting.GetBool(conf.SSOCompatibilityMode)
enabled := setting.GetBool(conf.SSOLoginEnabled)
clientId := setting.GetStr(conf.SSOClientId)
platform := setting.GetStr(conf.SSOLoginPlatform)
- var r_url string
- var redirect_uri string
+ var rUrl string
if !enabled {
common.ErrorStrResp(c, "Single sign-on is not enabled", 403)
return
@@ -53,69 +70,52 @@ func SSOLoginRedirect(c *gin.Context) {
common.ErrorStrResp(c, "no method provided", 400)
return
}
- if usecompatibility {
- redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/" + method
- } else {
- redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + method
- }
+ redirectUri := ssoRedirectUri(c, useCompatibility, method)
urlValues.Add("response_type", "code")
- urlValues.Add("redirect_uri", redirect_uri)
+ urlValues.Add("redirect_uri", redirectUri)
urlValues.Add("client_id", clientId)
switch platform {
case "Github":
- r_url = "https://github.com/login/oauth/authorize?"
+ rUrl = "https://github.com/login/oauth/authorize?"
urlValues.Add("scope", "read:user")
case "Microsoft":
- r_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize?"
+ rUrl = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize?"
urlValues.Add("scope", "user.read")
urlValues.Add("response_mode", "query")
case "Google":
- r_url = "https://accounts.google.com/o/oauth2/v2/auth?"
+ rUrl = "https://accounts.google.com/o/oauth2/v2/auth?"
urlValues.Add("scope", "https://www.googleapis.com/auth/userinfo.profile")
case "Dingtalk":
- r_url = "https://login.dingtalk.com/oauth2/auth?"
+ rUrl = "https://login.dingtalk.com/oauth2/auth?"
urlValues.Add("scope", "openid")
urlValues.Add("prompt", "consent")
urlValues.Add("response_type", "code")
case "Casdoor":
endpoint := strings.TrimSuffix(setting.GetStr(conf.SSOEndpointName), "/")
- r_url = endpoint + "/login/oauth/authorize?"
+ rUrl = endpoint + "/login/oauth/authorize?"
urlValues.Add("scope", "profile")
urlValues.Add("state", endpoint)
case "OIDC":
- oauth2Config, err := GetOIDCClient(c)
- if err != nil {
- common.ErrorStrResp(c, err.Error(), 400)
- return
- }
- // generate state parameter
- state, err := totp.GenerateCodeCustom(base32.StdEncoding.EncodeToString([]byte(oauth2Config.ClientSecret)), time.Now(), opts)
+ oauth2Config, err := GetOIDCClient(c, useCompatibility, redirectUri, method)
if err != nil {
common.ErrorStrResp(c, err.Error(), 400)
return
}
+ state := generateState(clientId, c.ClientIP())
c.Redirect(http.StatusFound, oauth2Config.AuthCodeURL(state))
return
default:
common.ErrorStrResp(c, "invalid platform", 400)
return
}
- c.Redirect(302, r_url+urlValues.Encode())
+ c.Redirect(302, rUrl+urlValues.Encode())
}
var ssoClient = resty.New().SetRetryCount(3)
-func GetOIDCClient(c *gin.Context) (*oauth2.Config, error) {
- var redirect_uri string
- usecompatibility := setting.GetBool(conf.SSOCompatibilityMode)
- argument := c.Query("method")
- if usecompatibility {
- argument = path.Base(c.Request.URL.Path)
- }
- if usecompatibility {
- redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/" + argument
- } else {
- redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + argument
+func GetOIDCClient(c *gin.Context, useCompatibility bool, redirectUri, method string) (*oauth2.Config, error) {
+ if redirectUri == "" {
+ redirectUri = ssoRedirectUri(c, useCompatibility, method)
}
endpoint := setting.GetStr(conf.SSOEndpointName)
provider, err := oidc.NewProvider(c, endpoint)
@@ -124,16 +124,20 @@ func GetOIDCClient(c *gin.Context) (*oauth2.Config, error) {
}
clientId := setting.GetStr(conf.SSOClientId)
clientSecret := setting.GetStr(conf.SSOClientSecret)
+ extraScopes := []string{}
+ if setting.GetStr(conf.SSOExtraScopes) != "" {
+ extraScopes = strings.Split(setting.GetStr(conf.SSOExtraScopes), " ")
+ }
return &oauth2.Config{
ClientID: clientId,
ClientSecret: clientSecret,
- RedirectURL: redirect_uri,
+ RedirectURL: redirectUri,
// Discovery returns the OAuth2 endpoints.
Endpoint: provider.Endpoint(),
// "openid" is a required scope for OpenID Connect flows.
- Scopes: []string{oidc.ScopeOpenID, "profile"},
+ Scopes: append([]string{oidc.ScopeOpenID, "profile"}, extraScopes...),
}, nil
}
@@ -181,9 +185,9 @@ func parseJWT(p string) ([]byte, error) {
func OIDCLoginCallback(c *gin.Context) {
useCompatibility := setting.GetBool(conf.SSOCompatibilityMode)
- argument := c.Query("method")
+ method := c.Query("method")
if useCompatibility {
- argument = path.Base(c.Request.URL.Path)
+ method = path.Base(c.Request.URL.Path)
}
clientId := setting.GetStr(conf.SSOClientId)
endpoint := setting.GetStr(conf.SSOEndpointName)
@@ -192,18 +196,12 @@ func OIDCLoginCallback(c *gin.Context) {
common.ErrorResp(c, err, 400)
return
}
- oauth2Config, err := GetOIDCClient(c)
+ oauth2Config, err := GetOIDCClient(c, useCompatibility, "", method)
if err != nil {
common.ErrorResp(c, err, 400)
return
}
- // add state verify process
- stateVerification, err := totp.ValidateCustom(c.Query("state"), base32.StdEncoding.EncodeToString([]byte(oauth2Config.ClientSecret)), time.Now(), opts)
- if err != nil {
- common.ErrorResp(c, err, 400)
- return
- }
- if !stateVerification {
+ if !verifyState(clientId, c.ClientIP(), c.Query("state")) {
common.ErrorStrResp(c, "incorrect or expired state parameter", 400)
return
}
@@ -236,7 +234,7 @@ func OIDCLoginCallback(c *gin.Context) {
common.ErrorStrResp(c, "cannot get username from OIDC provider", 400)
return
}
- if argument == "get_sso_id" {
+ if method == "get_sso_id" {
if useCompatibility {
c.Redirect(302, common.GetApiUrl(c.Request)+"/@manage?sso_id="+userID)
return
@@ -252,7 +250,7 @@ func OIDCLoginCallback(c *gin.Context) {
c.Data(200, "text/html; charset=utf-8", []byte(html))
return
}
- if argument == "sso_get_token" {
+ if method == "sso_get_token" {
user, err := db.GetUserBySSOID(userID)
if err != nil {
user, err = autoRegister(userID, userID, err)
diff --git a/server/handles/task.go b/server/handles/task.go
index a8b4d21b..af7974a9 100644
--- a/server/handles/task.go
+++ b/server/handles/task.go
@@ -1,7 +1,10 @@
package handles
import (
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/task"
"math"
+ "time"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/offline_download/tool"
@@ -12,15 +15,20 @@ import (
)
type TaskInfo struct {
- ID string `json:"id"`
- Name string `json:"name"`
- State tache.State `json:"state"`
- Status string `json:"status"`
- Progress float64 `json:"progress"`
- Error string `json:"error"`
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Creator string `json:"creator"`
+ CreatorRole int `json:"creator_role"`
+ State tache.State `json:"state"`
+ Status string `json:"status"`
+ Progress float64 `json:"progress"`
+ StartTime *time.Time `json:"start_time"`
+ EndTime *time.Time `json:"end_time"`
+ TotalBytes int64 `json:"total_bytes"`
+ Error string `json:"error"`
}
-func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo {
+func getTaskInfo[T task.TaskExtensionInfo](task T) TaskInfo {
errMsg := ""
if task.GetErr() != nil {
errMsg = task.GetErr().Error()
@@ -30,62 +38,179 @@ func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo {
if math.IsNaN(progress) {
progress = 100
}
+ creatorName := ""
+ creatorRole := -1
+ if task.GetCreator() != nil {
+ creatorName = task.GetCreator().Username
+ creatorRole = task.GetCreator().Role
+ }
return TaskInfo{
- ID: task.GetID(),
- Name: task.GetName(),
- State: task.GetState(),
- Status: task.GetStatus(),
- Progress: progress,
- Error: errMsg,
+ ID: task.GetID(),
+ Name: task.GetName(),
+ Creator: creatorName,
+ CreatorRole: creatorRole,
+ State: task.GetState(),
+ Status: task.GetStatus(),
+ Progress: progress,
+ StartTime: task.GetStartTime(),
+ EndTime: task.GetEndTime(),
+ TotalBytes: task.GetTotalBytes(),
+ Error: errMsg,
}
}
-func getTaskInfos[T tache.TaskWithInfo](tasks []T) []TaskInfo {
+func getTaskInfos[T task.TaskExtensionInfo](tasks []T) []TaskInfo {
return utils.MustSliceConvert(tasks, getTaskInfo[T])
}
-func taskRoute[T tache.TaskWithInfo](g *gin.RouterGroup, manager *tache.Manager[T]) {
- g.GET("/undone", func(c *gin.Context) {
- common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StatePending, tache.StateRunning,
- tache.StateCanceling, tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry)))
- })
- g.GET("/done", func(c *gin.Context) {
- common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)))
- })
- g.POST("/info", func(c *gin.Context) {
- tid := c.Query("tid")
- task, ok := manager.GetByID(tid)
+func argsContains[T comparable](v T, slice ...T) bool {
+ return utils.SliceContains(slice, v)
+}
+
+func getUserInfo(c *gin.Context) (bool, uint, bool) {
+ if user, ok := c.Value("user").(*model.User); ok {
+ return user.IsAdmin(), user.ID, true
+ } else {
+ return false, 0, false
+ }
+}
+
+func getTargetedHandler[T task.TaskExtensionInfo](manager task.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc {
+ return func(c *gin.Context) {
+ isAdmin, uid, ok := getUserInfo(c)
+ if !ok {
+ // if there is no bug, here is unreachable
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ t, ok := manager.GetByID(c.Query("tid"))
if !ok {
common.ErrorStrResp(c, "task not found", 404)
return
}
+ if !isAdmin && uid != t.GetCreator().ID {
+ // to avoid an attacker using error messages to guess valid TID, return a 404 rather than a 403
+ common.ErrorStrResp(c, "task not found", 404)
+ return
+ }
+ callback(c, t)
+ }
+}
+
+func getBatchHandler[T task.TaskExtensionInfo](manager task.Manager[T], callback func(task T)) gin.HandlerFunc {
+ return func(c *gin.Context) {
+ isAdmin, uid, ok := getUserInfo(c)
+ if !ok {
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ var tids []string
+ if err := c.ShouldBind(&tids); err != nil {
+ common.ErrorStrResp(c, "invalid request format", 400)
+ return
+ }
+ retErrs := make(map[string]string)
+ for _, tid := range tids {
+ t, ok := manager.GetByID(tid)
+ if !ok || (!isAdmin && uid != t.GetCreator().ID) {
+ retErrs[tid] = "task not found"
+ continue
+ }
+ callback(t)
+ }
+ common.SuccessResp(c, retErrs)
+ }
+}
+
+func taskRoute[T task.TaskExtensionInfo](g *gin.RouterGroup, manager task.Manager[T]) {
+ g.GET("/undone", func(c *gin.Context) {
+ isAdmin, uid, ok := getUserInfo(c)
+ if !ok {
+ // if there is no bug, here is unreachable
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool {
+ // avoid directly passing the user object into the function to reduce closure size
+ return (isAdmin || uid == task.GetCreator().ID) &&
+ argsContains(task.GetState(), tache.StatePending, tache.StateRunning, tache.StateCanceling,
+ tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry)
+ })))
+ })
+ g.GET("/done", func(c *gin.Context) {
+ isAdmin, uid, ok := getUserInfo(c)
+ if !ok {
+ // if there is no bug, here is unreachable
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool {
+ return (isAdmin || uid == task.GetCreator().ID) &&
+ argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
+ })))
+ })
+ g.POST("/info", getTargetedHandler(manager, func(c *gin.Context, task T) {
common.SuccessResp(c, getTaskInfo(task))
- })
- g.POST("/cancel", func(c *gin.Context) {
- tid := c.Query("tid")
- manager.Cancel(tid)
+ }))
+ g.POST("/cancel", getTargetedHandler(manager, func(c *gin.Context, task T) {
+ manager.Cancel(task.GetID())
common.SuccessResp(c)
- })
- g.POST("/delete", func(c *gin.Context) {
- tid := c.Query("tid")
- manager.Remove(tid)
+ }))
+ g.POST("/delete", getTargetedHandler(manager, func(c *gin.Context, task T) {
+ manager.Remove(task.GetID())
common.SuccessResp(c)
- })
- g.POST("/retry", func(c *gin.Context) {
- tid := c.Query("tid")
- manager.Retry(tid)
+ }))
+ g.POST("/retry", getTargetedHandler(manager, func(c *gin.Context, task T) {
+ manager.Retry(task.GetID())
common.SuccessResp(c)
- })
+ }))
+ g.POST("/cancel_some", getBatchHandler(manager, func(task T) {
+ manager.Cancel(task.GetID())
+ }))
+ g.POST("/delete_some", getBatchHandler(manager, func(task T) {
+ manager.Remove(task.GetID())
+ }))
+ g.POST("/retry_some", getBatchHandler(manager, func(task T) {
+ manager.Retry(task.GetID())
+ }))
g.POST("/clear_done", func(c *gin.Context) {
- manager.RemoveByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
+ isAdmin, uid, ok := getUserInfo(c)
+ if !ok {
+ // if there is no bug, here is unreachable
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ manager.RemoveByCondition(func(task T) bool {
+ return (isAdmin || uid == task.GetCreator().ID) &&
+ argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
+ })
common.SuccessResp(c)
})
g.POST("/clear_succeeded", func(c *gin.Context) {
- manager.RemoveByState(tache.StateSucceeded)
+ isAdmin, uid, ok := getUserInfo(c)
+ if !ok {
+ // if there is no bug, here is unreachable
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ manager.RemoveByCondition(func(task T) bool {
+ return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateSucceeded
+ })
common.SuccessResp(c)
})
g.POST("/retry_failed", func(c *gin.Context) {
- manager.RetryAllFailed()
+ isAdmin, uid, ok := getUserInfo(c)
+ if !ok {
+ // if there is no bug, here is unreachable
+ common.ErrorStrResp(c, "user invalid", 401)
+ return
+ }
+ tasks := manager.GetByCondition(func(task T) bool {
+ return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateFailed
+ })
+ for _, t := range tasks {
+ manager.Retry(t.GetID())
+ }
common.SuccessResp(c)
})
}
@@ -95,4 +220,6 @@ func SetupTaskRoute(g *gin.RouterGroup) {
taskRoute(g.Group("/copy"), fs.CopyTaskManager)
taskRoute(g.Group("/offline_download"), tool.DownloadTaskManager)
taskRoute(g.Group("/offline_download_transfer"), tool.TransferTaskManager)
+ taskRoute(g.Group("/decompress"), fs.ArchiveDownloadTaskManager)
+ taskRoute(g.Group("/decompress_upload"), fs.ArchiveContentUploadTaskManager)
}
diff --git a/server/handles/webauthn.go b/server/handles/webauthn.go
index 1bd1884e..c6a7650c 100644
--- a/server/handles/webauthn.go
+++ b/server/handles/webauthn.go
@@ -207,6 +207,10 @@ func DeleteAuthnLogin(c *gin.Context) {
return
}
err = db.RemoveAuthn(user, req.ID)
+ if err != nil {
+ common.ErrorResp(c, err, 400)
+ return
+ }
err = op.DelUserCache(user.Username)
if err != nil {
common.ErrorResp(c, err, 400)
diff --git a/server/middlewares/auth.go b/server/middlewares/auth.go
index 14f186be..d65d1ad6 100644
--- a/server/middlewares/auth.go
+++ b/server/middlewares/auth.go
@@ -127,6 +127,16 @@ func Authn(c *gin.Context) {
c.Next()
}
+func AuthNotGuest(c *gin.Context) {
+ user := c.MustGet("user").(*model.User)
+ if user.IsGuest() {
+ common.ErrorStrResp(c, "You are a guest", 403)
+ c.Abort()
+ } else {
+ c.Next()
+ }
+}
+
func AuthAdmin(c *gin.Context) {
user := c.MustGet("user").(*model.User)
if !user.IsAdmin() {
diff --git a/server/middlewares/down.go b/server/middlewares/down.go
index 05e9dc85..d015672d 100644
--- a/server/middlewares/down.go
+++ b/server/middlewares/down.go
@@ -9,35 +9,36 @@ import (
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
- "github.com/alist-org/alist/v3/internal/sign"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
)
-func Down(c *gin.Context) {
- rawPath := parsePath(c.Param("path"))
- c.Set("path", rawPath)
- meta, err := op.GetNearestMeta(rawPath)
- if err != nil {
- if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
- common.ErrorResp(c, err, 500, true)
- return
- }
- }
- c.Set("meta", meta)
- // verify sign
- if needSign(meta, rawPath) {
- s := c.Query("sign")
- err = sign.Verify(rawPath, strings.TrimSuffix(s, "/"))
+func Down(verifyFunc func(string, string) error) func(c *gin.Context) {
+ return func(c *gin.Context) {
+ rawPath := parsePath(c.Param("path"))
+ c.Set("path", rawPath)
+ meta, err := op.GetNearestMeta(rawPath)
if err != nil {
- common.ErrorResp(c, err, 401)
- c.Abort()
- return
+ if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
+ common.ErrorResp(c, err, 500, true)
+ return
+ }
}
+ c.Set("meta", meta)
+ // verify sign
+ if needSign(meta, rawPath) {
+ s := c.Query("sign")
+ err = verifyFunc(rawPath, strings.TrimSuffix(s, "/"))
+ if err != nil {
+ common.ErrorResp(c, err, 401)
+ c.Abort()
+ return
+ }
+ }
+ c.Next()
}
- c.Next()
}
// TODO: implement
diff --git a/server/middlewares/limit.go b/server/middlewares/limit.go
index 44c079b3..2ccee950 100644
--- a/server/middlewares/limit.go
+++ b/server/middlewares/limit.go
@@ -1,7 +1,9 @@
package middlewares
import (
+ "github.com/alist-org/alist/v3/internal/stream"
"github.com/gin-gonic/gin"
+ "io"
)
func MaxAllowed(n int) gin.HandlerFunc {
@@ -14,3 +16,37 @@ func MaxAllowed(n int) gin.HandlerFunc {
c.Next()
}
}
+
+func UploadRateLimiter(limiter stream.Limiter) gin.HandlerFunc {
+ return func(c *gin.Context) {
+ c.Request.Body = &stream.RateLimitReader{
+ Reader: c.Request.Body,
+ Limiter: limiter,
+ Ctx: c,
+ }
+ c.Next()
+ }
+}
+
+type ResponseWriterWrapper struct {
+ gin.ResponseWriter
+ WrapWriter io.Writer
+}
+
+func (w *ResponseWriterWrapper) Write(p []byte) (n int, err error) {
+ return w.WrapWriter.Write(p)
+}
+
+func DownloadRateLimiter(limiter stream.Limiter) gin.HandlerFunc {
+ return func(c *gin.Context) {
+ c.Writer = &ResponseWriterWrapper{
+ ResponseWriter: c.Writer,
+ WrapWriter: &stream.RateLimitWriter{
+ Writer: c.Writer,
+ Limiter: limiter,
+ Ctx: c,
+ },
+ }
+ c.Next()
+ }
+}
diff --git a/server/router.go b/server/router.go
index 5be593f7..09a0bb44 100644
--- a/server/router.go
+++ b/server/router.go
@@ -4,6 +4,8 @@ import (
"github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/message"
+ "github.com/alist-org/alist/v3/internal/sign"
+ "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
"github.com/alist-org/alist/v3/server/handles"
@@ -38,10 +40,19 @@ func Init(e *gin.Engine) {
WebDav(g.Group("/dav"))
S3(g.Group("/s3"))
- g.GET("/d/*path", middlewares.Down, handles.Down)
- g.GET("/p/*path", middlewares.Down, handles.Proxy)
- g.HEAD("/d/*path", middlewares.Down, handles.Down)
- g.HEAD("/p/*path", middlewares.Down, handles.Proxy)
+ downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit)
+ signCheck := middlewares.Down(sign.Verify)
+ g.GET("/d/*path", signCheck, downloadLimiter, handles.Down)
+ g.GET("/p/*path", signCheck, downloadLimiter, handles.Proxy)
+ g.HEAD("/d/*path", signCheck, handles.Down)
+ g.HEAD("/p/*path", signCheck, handles.Proxy)
+ archiveSignCheck := middlewares.Down(sign.VerifyArchive)
+ g.GET("/ad/*path", archiveSignCheck, downloadLimiter, handles.ArchiveDown)
+ g.GET("/ap/*path", archiveSignCheck, downloadLimiter, handles.ArchiveProxy)
+ g.GET("/ae/*path", archiveSignCheck, downloadLimiter, handles.ArchiveInternalExtract)
+ g.HEAD("/ad/*path", archiveSignCheck, handles.ArchiveDown)
+ g.HEAD("/ap/*path", archiveSignCheck, handles.ArchiveProxy)
+ g.HEAD("/ae/*path", archiveSignCheck, handles.ArchiveInternalExtract)
api := g.Group("/api")
auth := api.Group("", middlewares.Auth)
@@ -52,6 +63,9 @@ func Init(e *gin.Engine) {
api.POST("/auth/login/ldap", handles.LoginLdap)
auth.GET("/me", handles.CurrentUser)
auth.POST("/me/update", handles.UpdateCurrent)
+ auth.GET("/me/sshkey/list", handles.ListMyPublicKey)
+ auth.POST("/me/sshkey/add", handles.AddMyPublicKey)
+ auth.POST("/me/sshkey/delete", handles.DeleteMyPublicKey)
auth.POST("/auth/2fa/generate", handles.Generate2FA)
auth.POST("/auth/2fa/verify", handles.Verify2FA)
auth.GET("/auth/logout", handles.LogOut)
@@ -62,11 +76,11 @@ func Init(e *gin.Engine) {
api.GET("/auth/get_sso_id", handles.SSOLoginCallback)
api.GET("/auth/sso_get_token", handles.SSOLoginCallback)
- //webauthn
+ // webauthn
+ api.GET("/authn/webauthn_begin_login", handles.BeginAuthnLogin)
+ api.POST("/authn/webauthn_finish_login", handles.FinishAuthnLogin)
webauthn.GET("/webauthn_begin_registration", handles.BeginAuthnRegistration)
webauthn.POST("/webauthn_finish_registration", handles.FinishAuthnRegistration)
- webauthn.GET("/webauthn_begin_login", handles.BeginAuthnLogin)
- webauthn.POST("/webauthn_finish_login", handles.FinishAuthnLogin)
webauthn.POST("/delete_authn", handles.DeleteAuthnLogin)
webauthn.GET("/getcredentials", handles.GetAuthnCredentials)
@@ -74,8 +88,10 @@ func Init(e *gin.Engine) {
public := api.Group("/public")
public.Any("/settings", handles.PublicSettings)
public.Any("/offline_download_tools", handles.OfflineDownloadTools)
+ public.Any("/archive_extensions", handles.ArchiveExtensions)
_fs(auth.Group("/fs"))
+ _task(auth.Group("/task", middlewares.AuthNotGuest))
admin(auth.Group("/admin", middlewares.AuthAdmin))
if flags.Debug || flags.Dev {
debug(g.Group("/debug"))
@@ -101,6 +117,8 @@ func admin(g *gin.RouterGroup) {
user.POST("/cancel_2fa", handles.Cancel2FAById)
user.POST("/delete", handles.DeleteUser)
user.POST("/del_cache", handles.DelUserCache)
+ user.GET("/sshkey/list", handles.ListPublicKeys)
+ user.POST("/sshkey/delete", handles.DeletePublicKey)
storage := g.Group("/storage")
storage.GET("/list", handles.ListStorages)
@@ -125,9 +143,13 @@ func admin(g *gin.RouterGroup) {
setting.POST("/reset_token", handles.ResetToken)
setting.POST("/set_aria2", handles.SetAria2)
setting.POST("/set_qbit", handles.SetQbittorrent)
+ setting.POST("/set_transmission", handles.SetTransmission)
+ setting.POST("/set_115", handles.Set115)
+ setting.POST("/set_pikpak", handles.SetPikPak)
+ setting.POST("/set_thunder", handles.SetThunder)
- task := g.Group("/task")
- handles.SetupTaskRoute(task)
+ // retain /admin/task API to ensure compatibility with legacy automation scripts
+ _task(g.Group("/task"))
ms := g.Group("/message")
ms.POST("/get", message.HttpInstance.GetHandle)
@@ -156,17 +178,27 @@ func _fs(g *gin.RouterGroup) {
g.POST("/copy", handles.FsCopy)
g.POST("/remove", handles.FsRemove)
g.POST("/remove_empty_directory", handles.FsRemoveEmptyDirectory)
- g.PUT("/put", middlewares.FsUp, handles.FsStream)
- g.PUT("/form", middlewares.FsUp, handles.FsForm)
+ uploadLimiter := middlewares.UploadRateLimiter(stream.ClientUploadLimit)
+ g.PUT("/put", middlewares.FsUp, uploadLimiter, handles.FsStream)
+ g.PUT("/form", middlewares.FsUp, uploadLimiter, handles.FsForm)
g.POST("/link", middlewares.AuthAdmin, handles.Link)
- //g.POST("/add_aria2", handles.AddOfflineDownload)
- //g.POST("/add_qbit", handles.AddQbittorrent)
+ // g.POST("/add_aria2", handles.AddOfflineDownload)
+ // g.POST("/add_qbit", handles.AddQbittorrent)
+ // g.POST("/add_transmission", handles.SetTransmission)
g.POST("/add_offline_download", handles.AddOfflineDownload)
+ a := g.Group("/archive")
+ a.Any("/meta", handles.FsArchiveMeta)
+ a.Any("/list", handles.FsArchiveList)
+ a.POST("/decompress", handles.FsArchiveDecompress)
+}
+
+func _task(g *gin.RouterGroup) {
+ handles.SetupTaskRoute(g)
}
func Cors(r *gin.Engine) {
config := cors.DefaultConfig()
- //config.AllowAllOrigins = true
+ // config.AllowAllOrigins = true
config.AllowOrigins = conf.Conf.Cors.AllowOrigins
config.AllowHeaders = conf.Conf.Cors.AllowHeaders
config.AllowMethods = conf.Conf.Cors.AllowMethods
diff --git a/server/s3/backend.go b/server/s3/backend.go
index e0cfd967..a1e99044 100644
--- a/server/s3/backend.go
+++ b/server/s3/backend.go
@@ -6,13 +6,14 @@ import (
"context"
"encoding/hex"
"fmt"
- "github.com/pkg/errors"
"io"
"path"
"strings"
"sync"
"time"
+ "github.com/pkg/errors"
+
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
@@ -173,20 +174,28 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
if link.RangeReadCloser == nil && link.MFile == nil && len(link.URL) == 0 {
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support s3")
}
- remoteFileSize := file.GetSize()
- remoteClosers := utils.EmptyClosers()
- rangeReaderFunc := func(ctx context.Context, start, length int64) (io.ReadCloser, error) {
+
+ var rdr io.ReadCloser
+ length := int64(-1)
+ start := int64(0)
+ if rnge != nil {
+ start, length = rnge.Start, rnge.Length
+ }
+ // 参考 server/common/proxy.go
+ if link.MFile != nil {
+ _, err := link.MFile.Seek(start, io.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+ rdr = link.MFile
+ } else {
+ remoteFileSize := file.GetSize()
if length >= 0 && start+length >= remoteFileSize {
length = -1
}
rrc := link.RangeReadCloser
if len(link.URL) > 0 {
-
- rangedRemoteLink := &model.Link{
- URL: link.URL,
- Header: link.Header,
- }
- var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
+ var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, link)
if err != nil {
return nil, err
}
@@ -194,35 +203,12 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
}
if rrc != nil {
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: start, Length: length})
- remoteClosers.AddClosers(rrc.GetClosers())
if err != nil {
return nil, err
}
- return remoteReader, nil
- }
- if link.MFile != nil {
- _, err := link.MFile.Seek(start, io.SeekStart)
- if err != nil {
- return nil, err
- }
- //remoteClosers.Add(remoteLink.MFile)
- //keep reuse same MFile and close at last.
- remoteClosers.Add(link.MFile)
- return io.NopCloser(link.MFile), nil
- }
- return nil, errs.NotSupport
- }
-
- var rdr io.ReadCloser
- if rnge != nil {
- rdr, err = rangeReaderFunc(ctx, rnge.Start, rnge.Length)
- if err != nil {
- return nil, err
- }
- } else {
- rdr, err = rangeReaderFunc(ctx, 0, -1)
- if err != nil {
- return nil, err
+ rdr = utils.ReadCloser{Reader: remoteReader, Closer: rrc}
+ } else {
+ return nil, errs.NotSupport
}
}
diff --git a/server/sftp.go b/server/sftp.go
new file mode 100644
index 00000000..42c676e8
--- /dev/null
+++ b/server/sftp.go
@@ -0,0 +1,140 @@
+package server
+
+import (
+ "context"
+ "github.com/KirCute/sftpd-alist"
+ "github.com/alist-org/alist/v3/internal/conf"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/internal/op"
+ "github.com/alist-org/alist/v3/internal/setting"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/alist-org/alist/v3/server/ftp"
+ "github.com/alist-org/alist/v3/server/sftp"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/ssh"
+ "net/http"
+ "time"
+)
+
+type SftpDriver struct {
+ proxyHeader *http.Header
+ config *sftpd.Config
+}
+
+func NewSftpDriver() (*SftpDriver, error) {
+ sftp.InitHostKey()
+ header := &http.Header{}
+ header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent))
+ return &SftpDriver{
+ proxyHeader: header,
+ }, nil
+}
+
+func (d *SftpDriver) GetConfig() *sftpd.Config {
+ if d.config != nil {
+ return d.config
+ }
+ serverConfig := ssh.ServerConfig{
+ NoClientAuth: true,
+ NoClientAuthCallback: d.NoClientAuth,
+ PasswordCallback: d.PasswordAuth,
+ PublicKeyCallback: d.PublicKeyAuth,
+ AuthLogCallback: d.AuthLogCallback,
+ BannerCallback: d.GetBanner,
+ }
+ for _, k := range sftp.SSHSigners {
+ serverConfig.AddHostKey(k)
+ }
+ d.config = &sftpd.Config{
+ ServerConfig: serverConfig,
+ HostPort: conf.Conf.SFTP.Listen,
+ ErrorLogFunc: utils.Log.Error,
+ //DebugLogFunc: utils.Log.Debugf,
+ }
+ return d.config
+}
+
+func (d *SftpDriver) GetFileSystem(sc *ssh.ServerConn) (sftpd.FileSystem, error) {
+ userObj, err := op.GetUserByName(sc.User())
+ if err != nil {
+ return nil, err
+ }
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, "user", userObj)
+ ctx = context.WithValue(ctx, "meta_pass", "")
+ ctx = context.WithValue(ctx, "client_ip", sc.RemoteAddr().String())
+ ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader)
+ return &sftp.DriverAdapter{FtpDriver: ftp.NewAferoAdapter(ctx)}, nil
+}
+
+func (d *SftpDriver) Close() {
+}
+
+func (d *SftpDriver) NoClientAuth(conn ssh.ConnMetadata) (*ssh.Permissions, error) {
+ if conn.User() != "guest" {
+ return nil, errors.New("only guest is allowed to login without authorization")
+ }
+ guest, err := op.GetGuest()
+ if err != nil {
+ return nil, err
+ }
+ if guest.Disabled || !guest.CanFTPAccess() {
+ return nil, errors.New("user is not allowed to access via SFTP")
+ }
+ return nil, nil
+}
+
+func (d *SftpDriver) PasswordAuth(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
+ userObj, err := op.GetUserByName(conn.User())
+ if err != nil {
+ return nil, err
+ }
+ if userObj.Disabled || !userObj.CanFTPAccess() {
+ return nil, errors.New("user is not allowed to access via SFTP")
+ }
+ passHash := model.StaticHash(string(password))
+ if err = userObj.ValidatePwdStaticHash(passHash); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (d *SftpDriver) PublicKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
+ userObj, err := op.GetUserByName(conn.User())
+ if err != nil {
+ return nil, err
+ }
+ if userObj.Disabled || !userObj.CanFTPAccess() {
+ return nil, errors.New("user is not allowed to access via SFTP")
+ }
+ keys, _, err := op.GetSSHPublicKeyByUserId(userObj.ID, 1, -1)
+ if err != nil {
+ return nil, err
+ }
+ marshal := string(key.Marshal())
+ for _, sk := range keys {
+ if marshal != sk.KeyStr {
+ pubKey, _, _, _, e := ssh.ParseAuthorizedKey([]byte(sk.KeyStr))
+ if e != nil || marshal != string(pubKey.Marshal()) {
+ continue
+ }
+ }
+ sk.LastUsedTime = time.Now()
+ _ = op.UpdateSSHPublicKey(&sk)
+ return nil, nil
+ }
+ return nil, errors.New("public key refused")
+}
+
+func (d *SftpDriver) AuthLogCallback(conn ssh.ConnMetadata, method string, err error) {
+ ip := conn.RemoteAddr().String()
+ if err == nil {
+ utils.Log.Infof("[SFTP] %s(%s) logged in via %s", conn.User(), ip, method)
+ } else if method != "none" {
+ utils.Log.Infof("[SFTP] %s(%s) tries logging in via %s but with error: %s", conn.User(), ip, method, err)
+ }
+}
+
+func (d *SftpDriver) GetBanner(_ ssh.ConnMetadata) string {
+ return setting.GetStr(conf.Announcement)
+}
diff --git a/server/sftp/const.go b/server/sftp/const.go
new file mode 100644
index 00000000..58bfe382
--- /dev/null
+++ b/server/sftp/const.go
@@ -0,0 +1,11 @@
+package sftp
+
+// From leffss/sftpd
+const (
+ SSH_FXF_READ = 0x00000001
+ SSH_FXF_WRITE = 0x00000002
+ SSH_FXF_APPEND = 0x00000004
+ SSH_FXF_CREAT = 0x00000008
+ SSH_FXF_TRUNC = 0x00000010
+ SSH_FXF_EXCL = 0x00000020
+)
diff --git a/server/sftp/hostkey.go b/server/sftp/hostkey.go
new file mode 100644
index 00000000..0db103dd
--- /dev/null
+++ b/server/sftp/hostkey.go
@@ -0,0 +1,105 @@
+package sftp
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "github.com/alist-org/alist/v3/cmd/flags"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "golang.org/x/crypto/ssh"
+ "os"
+ "path/filepath"
+)
+
+var SSHSigners []ssh.Signer
+
+func InitHostKey() {
+ if SSHSigners != nil {
+ return
+ }
+ sshPath := filepath.Join(flags.DataDir, "ssh")
+ if !utils.Exists(sshPath) {
+ err := utils.CreateNestedDirectory(sshPath)
+ if err != nil {
+ utils.Log.Fatalf("failed to create ssh directory: %+v", err)
+ return
+ }
+ }
+ SSHSigners = make([]ssh.Signer, 0, 4)
+ if rsaKey, ok := LoadOrGenerateRSAHostKey(sshPath); ok {
+ SSHSigners = append(SSHSigners, rsaKey)
+ }
+ // TODO Add keys for other encryption algorithms
+}
+
+func LoadOrGenerateRSAHostKey(parentDir string) (ssh.Signer, bool) {
+ privateKeyPath := filepath.Join(parentDir, "ssh_host_rsa_key")
+ publicKeyPath := filepath.Join(parentDir, "ssh_host_rsa_key.pub")
+ privateKeyBytes, err := os.ReadFile(privateKeyPath)
+ if err == nil {
+ var privateKey *rsa.PrivateKey
+ privateKey, err = rsaDecodePrivateKey(privateKeyBytes)
+ if err == nil {
+ var ret ssh.Signer
+ ret, err = ssh.NewSignerFromKey(privateKey)
+ if err == nil {
+ return ret, true
+ }
+ }
+ }
+ _ = os.Remove(privateKeyPath)
+ _ = os.Remove(publicKeyPath)
+ privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
+ if err != nil {
+ utils.Log.Fatalf("failed to generate RSA private key: %+v", err)
+ return nil, false
+ }
+ publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey)
+ if err != nil {
+ utils.Log.Fatalf("failed to generate RSA public key: %+v", err)
+ return nil, false
+ }
+ ret, err := ssh.NewSignerFromKey(privateKey)
+ if err != nil {
+ utils.Log.Fatalf("failed to generate RSA signer: %+v", err)
+ return nil, false
+ }
+ privateBytes := rsaEncodePrivateKey(privateKey)
+ publicBytes := ssh.MarshalAuthorizedKey(publicKey)
+ err = os.WriteFile(privateKeyPath, privateBytes, 0600)
+ if err != nil {
+ utils.Log.Fatalf("failed to write RSA private key to file: %+v", err)
+ return nil, false
+ }
+ err = os.WriteFile(publicKeyPath, publicBytes, 0644)
+ if err != nil {
+ _ = os.Remove(privateKeyPath)
+ utils.Log.Fatalf("failed to write RSA public key to file: %+v", err)
+ return nil, false
+ }
+ return ret, true
+}
+
+func rsaEncodePrivateKey(privateKey *rsa.PrivateKey) []byte {
+ privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)
+ privateBlock := &pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Headers: nil,
+ Bytes: privateKeyBytes,
+ }
+ return pem.EncodeToMemory(privateBlock)
+}
+
+func rsaDecodePrivateKey(bytes []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(bytes)
+ if block == nil {
+ return nil, fmt.Errorf("failed to parse PEM block containing the key")
+ }
+ privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return privateKey, nil
+}
diff --git a/server/sftp/sftp.go b/server/sftp/sftp.go
new file mode 100644
index 00000000..1ceb3f59
--- /dev/null
+++ b/server/sftp/sftp.go
@@ -0,0 +1,123 @@
+package sftp
+
+import (
+ "github.com/KirCute/sftpd-alist"
+ "github.com/alist-org/alist/v3/internal/errs"
+ "github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/pkg/utils"
+ "github.com/alist-org/alist/v3/server/ftp"
+ "os"
+)
+
+type DriverAdapter struct {
+ FtpDriver *ftp.AferoAdapter
+}
+
+func (s *DriverAdapter) OpenFile(_ string, _ uint32, _ *sftpd.Attr) (sftpd.File, error) {
+ // See also GetHandle
+ return nil, errs.NotImplement
+}
+
+func (s *DriverAdapter) OpenDir(_ string) (sftpd.Dir, error) {
+ // See also GetHandle
+ return nil, errs.NotImplement
+}
+
+func (s *DriverAdapter) Remove(name string) error {
+ return s.FtpDriver.Remove(name)
+}
+
+func (s *DriverAdapter) Rename(old, new string, _ uint32) error {
+ return s.FtpDriver.Rename(old, new)
+}
+
+func (s *DriverAdapter) Mkdir(name string, attr *sftpd.Attr) error {
+ return s.FtpDriver.Mkdir(name, attr.Mode)
+}
+
+func (s *DriverAdapter) Rmdir(name string) error {
+ return s.Remove(name)
+}
+
+func (s *DriverAdapter) Stat(name string, _ bool) (*sftpd.Attr, error) {
+ stat, err := s.FtpDriver.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ return fileInfoToSftpAttr(stat), nil
+}
+
+func (s *DriverAdapter) SetStat(_ string, _ *sftpd.Attr) error {
+ return errs.NotSupport
+}
+
+func (s *DriverAdapter) ReadLink(_ string) (string, error) {
+ return "", errs.NotSupport
+}
+
+func (s *DriverAdapter) CreateLink(_, _ string, _ uint32) error {
+ return errs.NotSupport
+}
+
+func (s *DriverAdapter) RealPath(path string) (string, error) {
+ return utils.FixAndCleanPath(path), nil
+}
+
+func (s *DriverAdapter) GetHandle(name string, flags uint32, _ *sftpd.Attr, offset uint64) (sftpd.FileTransfer, error) {
+ return s.FtpDriver.GetHandle(name, sftpFlagToOpenMode(flags), int64(offset))
+}
+
+func (s *DriverAdapter) ReadDir(name string) ([]sftpd.NamedAttr, error) {
+ dir, err := s.FtpDriver.ReadDir(name)
+ if err != nil {
+ return nil, err
+ }
+ ret := make([]sftpd.NamedAttr, len(dir))
+ for i, d := range dir {
+ ret[i] = *fileInfoToSftpNamedAttr(d)
+ }
+ return ret, nil
+}
+
+// From leffss/sftpd
+func sftpFlagToOpenMode(flags uint32) int {
+ mode := 0
+ if (flags & SSH_FXF_READ) != 0 {
+ mode |= os.O_RDONLY
+ }
+ if (flags & SSH_FXF_WRITE) != 0 {
+ mode |= os.O_WRONLY
+ }
+ if (flags & SSH_FXF_APPEND) != 0 {
+ mode |= os.O_APPEND
+ }
+ if (flags & SSH_FXF_CREAT) != 0 {
+ mode |= os.O_CREATE
+ }
+ if (flags & SSH_FXF_TRUNC) != 0 {
+ mode |= os.O_TRUNC
+ }
+ if (flags & SSH_FXF_EXCL) != 0 {
+ mode |= os.O_EXCL
+ }
+ return mode
+}
+
+func fileInfoToSftpAttr(stat os.FileInfo) *sftpd.Attr {
+ ret := &sftpd.Attr{}
+ ret.Flags |= sftpd.ATTR_SIZE
+ ret.Size = uint64(stat.Size())
+ ret.Flags |= sftpd.ATTR_MODE
+ ret.Mode = stat.Mode()
+ ret.Flags |= sftpd.ATTR_TIME
+ ret.ATime = stat.Sys().(model.Obj).CreateTime()
+ ret.MTime = stat.ModTime()
+ return ret
+}
+
+func fileInfoToSftpNamedAttr(stat os.FileInfo) *sftpd.NamedAttr {
+ return &sftpd.NamedAttr{
+ Name: stat.Name(),
+ Attr: *fileInfoToSftpAttr(stat),
+ }
+}
diff --git a/server/static/static.go b/server/static/static.go
index ec16014c..d5d6ff68 100644
--- a/server/static/static.go
+++ b/server/static/static.go
@@ -102,6 +102,10 @@ func Static(r *gin.RouterGroup, noRoute func(handlers ...gin.HandlerFunc)) {
}
noRoute(func(c *gin.Context) {
+ if c.Request.Method != "GET" && c.Request.Method != "POST" {
+ c.Status(405)
+ return
+ }
c.Header("Content-Type", "text/html")
c.Status(200)
if strings.HasPrefix(c.Request.URL.Path, "/@manage") {
diff --git a/server/webdav.go b/server/webdav.go
index 2b5c9618..a735e285 100644
--- a/server/webdav.go
+++ b/server/webdav.go
@@ -3,6 +3,8 @@ package server
import (
"context"
"crypto/subtle"
+ "github.com/alist-org/alist/v3/internal/stream"
+ "github.com/alist-org/alist/v3/server/middlewares"
"net/http"
"path"
"strings"
@@ -11,7 +13,6 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
- "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/webdav"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
@@ -28,8 +29,10 @@ func WebDav(dav *gin.RouterGroup) {
},
}
dav.Use(WebDAVAuth)
- dav.Any("/*path", ServeWebDAV)
- dav.Any("", ServeWebDAV)
+ uploadLimiter := middlewares.UploadRateLimiter(stream.ClientUploadLimit)
+ downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit)
+ dav.Any("/*path", uploadLimiter, downloadLimiter, ServeWebDAV)
+ dav.Any("", uploadLimiter, downloadLimiter, ServeWebDAV)
dav.Handle("PROPFIND", "/*path", ServeWebDAV)
dav.Handle("PROPFIND", "", ServeWebDAV)
dav.Handle("MKCOL", "/*path", ServeWebDAV)
@@ -99,12 +102,27 @@ func WebDAVAuth(c *gin.Context) {
c.Abort()
return
}
- if !user.CanWebdavManage() && utils.SliceContains([]string{"PUT", "DELETE", "PROPPATCH", "MKCOL", "COPY", "MOVE"}, c.Request.Method) {
- if c.Request.Method == "OPTIONS" {
- c.Set("user", guest)
- c.Next()
- return
- }
+ if (c.Request.Method == "PUT" || c.Request.Method == "MKCOL") && (!user.CanWebdavManage() || !user.CanWrite()) {
+ c.Status(http.StatusForbidden)
+ c.Abort()
+ return
+ }
+ if c.Request.Method == "MOVE" && (!user.CanWebdavManage() || (!user.CanMove() && !user.CanRename())) {
+ c.Status(http.StatusForbidden)
+ c.Abort()
+ return
+ }
+ if c.Request.Method == "COPY" && (!user.CanWebdavManage() || !user.CanCopy()) {
+ c.Status(http.StatusForbidden)
+ c.Abort()
+ return
+ }
+ if c.Request.Method == "DELETE" && (!user.CanWebdavManage() || !user.CanRemove()) {
+ c.Status(http.StatusForbidden)
+ c.Abort()
+ return
+ }
+ if c.Request.Method == "PROPPATCH" && !user.CanWebdavManage() {
c.Status(http.StatusForbidden)
c.Abort()
return
diff --git a/server/webdav/file.go b/server/webdav/file.go
index 01e96f7d..ac8f5c1c 100644
--- a/server/webdav/file.go
+++ b/server/webdav/file.go
@@ -33,6 +33,13 @@ func moveFiles(ctx context.Context, src, dst string, overwrite bool) (status int
dstDir := path.Dir(dst)
srcName := path.Base(src)
dstName := path.Base(dst)
+ user := ctx.Value("user").(*model.User)
+ if srcDir != dstDir && !user.CanMove() {
+ return http.StatusForbidden, nil
+ }
+ if srcName != dstName && !user.CanRename() {
+ return http.StatusForbidden, nil
+ }
if srcDir == dstDir {
err = fs.Rename(ctx, src, dstName)
} else {
diff --git a/server/webdav/prop.go b/server/webdav/prop.go
index b1474ea3..a81f31b0 100644
--- a/server/webdav/prop.go
+++ b/server/webdav/prop.go
@@ -18,6 +18,7 @@ import (
"time"
"github.com/alist-org/alist/v3/internal/model"
+ "github.com/alist-org/alist/v3/server/common"
)
// Proppatch describes a property update instruction as defined in RFC 4918.
@@ -101,7 +102,7 @@ type DeadPropsHolder interface {
Patch([]Proppatch) ([]Propstat, error)
}
-// liveProps contains all supported, protected DAV: properties.
+// liveProps contains all supported properties.
var liveProps = map[xml.Name]struct {
// findFn implements the propfind function of this property. If nil,
// it indicates a hidden property.
@@ -160,6 +161,10 @@ var liveProps = map[xml.Name]struct {
findFn: findSupportedLock,
dir: true,
},
+ {Space: "http://owncloud.org/ns", Local: "checksums"}: {
+ findFn: findChecksums,
+ dir: false,
+ },
}
// TODO(nigeltao) merge props and allprop?
@@ -473,7 +478,7 @@ func findETag(ctx context.Context, ls LockSystem, name string, fi model.Obj) (st
// The Apache http 2.4 web server by default concatenates the
// modification time and size of a file. We replicate the heuristic
// with nanosecond granularity.
- return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.GetSize()), nil
+ return common.GetEtag(fi), nil
}
func findSupportedLock(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) {
@@ -483,3 +488,11 @@ func findSupportedLock(ctx context.Context, ls LockSystem, name string, fi model
`