From 6106a2d4cc275df59abb992b8ad8ad32d45f69ad Mon Sep 17 00:00:00 2001 From: Shelton Zhu <498220739@qq.com> Date: Wed, 18 Sep 2024 23:30:28 +0800 Subject: [PATCH 001/187] fix: dynamic update app version (close #7198 in #7220) --- drivers/115/util.go | 23 ++++++++++++++++++++--- go.mod | 2 +- go.sum | 14 ++------------ 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/drivers/115/util.go b/drivers/115/util.go index 992502c4..ddddf6e9 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -74,9 +74,23 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) { } const ( - appVer = "2.0.3.6" + appVer = "27.0.3.7" ) +func (c *Pan115) getAppVer() string { + // todo add some cache? + vers, err := c.client.GetAppVersion() + if err != nil { + return appVer + } + for _, ver := range vers { + if ver.AppName == "win" { + return ver.Version + } + } + return appVer +} + func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) { key := crypto.GenerateKey() result := driver115.DownloadResp{} @@ -151,7 +165,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri userID := strconv.FormatInt(d.client.UserID, 10) form := url.Values{} form.Set("appid", "0") - form.Set("appversion", appVer) + form.Set("appversion", d.getAppVer()) form.Set("userid", userID) form.Set("filename", fileName) form.Set("filesize", fileSizeStr) @@ -161,7 +175,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri signKey, signVal := "", "" for retry := true; retry; { - t := driver115.Now() + t := driver115.NowMilli() if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil { return nil, err @@ -225,6 +239,9 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri length := end - start + 1 reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length}) + if err != nil { + return "", err + } hashStr, err := utils.HashReader(utils.SHA1, reader) if err != nil { return "", err diff --git a/go.mod b/go.mod index 8ec1c302..94e10ca1 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/alist-org/alist/v3 go 1.22.4 require ( - github.com/SheltonZhu/115driver v1.0.27 + github.com/SheltonZhu/115driver v1.0.29 github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4 github.com/alist-org/gofakes3 v0.0.7 diff --git a/go.sum b/go.sum index 6ba075f3..346a2d45 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,8 @@ github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9 github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= -github.com/SheltonZhu/115driver v1.0.27 h1:Ya1HYHYXFmi7JnqQ/+Vy6xZvq3leto+E+PxTm6UChj8= -github.com/SheltonZhu/115driver v1.0.27/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4= +github.com/SheltonZhu/115driver v1.0.29 h1:yFBqFDYJyADo3eG2RjJgSovnFd1OrpGHmsHBi6j0+r4= +github.com/SheltonZhu/115driver v1.0.29/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4= github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A= github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 h1:h6q5E9aMBhhdqouW81LozVPI1I+Pu6IxL2EKpfm5OjY= @@ -96,8 +96,6 @@ github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/bubbles v0.19.0 h1:gKZkKXPP6GlDk6EcfujDK19PCQqRjaJZQ7QRERx1UF0= -github.com/charmbracelet/bubbles v0.19.0/go.mod h1:WILteEqZ+krG5c3ntGEMeG99nCupcuIk7V0/zOP0tOA= github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU= github.com/charmbracelet/bubbletea v1.1.0 h1:FjAl9eAL3HBCHenhz/ZPjkKdScmaS5SK69JAK2YJK9c= @@ -548,8 +546,6 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= @@ -626,8 +622,6 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= @@ -640,8 +634,6 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -657,8 +649,6 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From bdf4b52885299d8480dca554885811964fb0a94d Mon Sep 17 00:00:00 2001 From: Shelton Zhu <498220739@qq.com> Date: Sat, 28 Sep 2024 23:15:58 +0800 Subject: [PATCH 002/187] feat(offline_download): add transmission (close #4102 in #7232) --- go.mod | 3 + go.sum | 6 + internal/conf/const.go | 12 +- internal/offline_download/all.go | 1 + internal/offline_download/tool/download.go | 13 ++ .../offline_download/transmission/client.go | 176 ++++++++++++++++++ server/handles/offline_download.go | 35 ++++ server/router.go | 10 +- 8 files changed, 248 insertions(+), 8 deletions(-) create mode 100644 internal/offline_download/transmission/client.go diff --git a/go.mod b/go.mod index 94e10ca1..9b9d859d 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 + github.com/hekmon/transmissionrpc/v3 v3.0.0 github.com/hirochachacha/go-smb2 v1.1.0 github.com/ipfs/go-ipfs-api v0.7.0 github.com/jlaffaye/ftp v0.2.0 @@ -82,6 +83,8 @@ require ( github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hekmon/cunits/v2 v2.1.0 // indirect github.com/ipfs/boxo v0.12.0 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect ) diff --git a/go.sum b/go.sum index 346a2d45..f4699bc2 100644 --- a/go.sum +++ b/go.sum @@ -240,11 +240,17 @@ github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI0= +github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M= +github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ= +github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg= github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI= github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= diff --git a/internal/conf/const.go b/internal/conf/const.go index 2d53702e..13787b5e 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -54,11 +54,15 @@ const ( Aria2Uri = "aria2_uri" Aria2Secret = "aria2_secret" + // transmission + TransmissionUri = "transmission_uri" + TransmissionSeedtime = "transmission_seedtime" + // single Token = "token" IndexProgress = "index_progress" - //SSO + // SSO SSOClientId = "sso_client_id" SSOClientSecret = "sso_client_secret" SSOLoginEnabled = "sso_login_enabled" @@ -73,7 +77,7 @@ const ( SSODefaultPermission = "sso_default_permission" SSOCompatibilityMode = "sso_compatibility_mode" - //ldap + // ldap LdapLoginEnabled = "ldap_login_enabled" LdapServer = "ldap_server" LdapManagerDN = "ldap_manager_dn" @@ -84,7 +88,7 @@ const ( LdapDefaultDir = "ldap_default_dir" LdapLoginTips = "ldap_login_tips" - //s3 + // s3 S3Buckets = "s3_buckets" S3AccessKeyId = "s3_access_key_id" S3SecretAccessKey = "s3_secret_access_key" @@ -97,7 +101,7 @@ const ( const ( UNKNOWN = iota FOLDER - //OFFICE + // OFFICE VIDEO AUDIO TEXT diff --git a/internal/offline_download/all.go b/internal/offline_download/all.go index ee80b5a0..6682155d 100644 --- a/internal/offline_download/all.go +++ b/internal/offline_download/all.go @@ -6,4 +6,5 @@ import ( _ "github.com/alist-org/alist/v3/internal/offline_download/http" _ "github.com/alist-org/alist/v3/internal/offline_download/pikpak" _ "github.com/alist-org/alist/v3/internal/offline_download/qbit" + _ "github.com/alist-org/alist/v3/internal/offline_download/transmission" ) diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go index 4cc86a26..ef9ceabf 100644 --- a/internal/offline_download/tool/download.go +++ b/internal/offline_download/tool/download.go @@ -101,6 +101,19 @@ outer: } } } + + if t.tool.Name() == "transmission" { + // hack for transmission + seedTime := setting.GetInt(conf.TransmissionSeedtime, 0) + if seedTime >= 0 { + t.Status = "offline download completed, waiting for seeding" + <-time.After(time.Minute * time.Duration(seedTime)) + err := t.tool.Remove(t) + if err != nil { + log.Errorln(err.Error()) + } + } + } return nil } diff --git a/internal/offline_download/transmission/client.go b/internal/offline_download/transmission/client.go new file mode 100644 index 00000000..a6075414 --- /dev/null +++ b/internal/offline_download/transmission/client.go @@ -0,0 +1,176 @@ +package transmission + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/offline_download/tool" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/hekmon/transmissionrpc/v3" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +type Transmission struct { + client *transmissionrpc.Client +} + +func (t *Transmission) Run(task *tool.DownloadTask) error { + return errs.NotSupport +} + +func (t *Transmission) Name() string { + return "transmission" +} + +func (t *Transmission) Items() []model.SettingItem { + // transmission settings + return []model.SettingItem{ + {Key: conf.TransmissionUri, Value: "http://localhost:9091/transmission/rpc", Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + {Key: conf.TransmissionSeedtime, Value: "0", Type: conf.TypeNumber, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } +} + +func (t *Transmission) Init() (string, error) { + t.client = nil + uri := setting.GetStr(conf.TransmissionUri) + endpoint, err := url.Parse(uri) + if err != nil { + return "", errors.Wrap(err, "failed to init transmission client") + } + c, err := transmissionrpc.New(endpoint, nil) + if err != nil { + return "", errors.Wrap(err, "failed to init transmission client") + } + + ok, serverVersion, serverMinimumVersion, err := c.RPCVersion(context.Background()) + if err != nil { + return "", errors.Wrapf(err, "failed get transmission version") + } + + if !ok { + return "", fmt.Errorf("remote transmission RPC version (v%d) is incompatible with the transmission library (v%d): remote needs at least v%d", + serverVersion, transmissionrpc.RPCVersion, serverMinimumVersion) + } + + t.client = c + log.Infof("remote transmission RPC version (v%d) is compatible with our transmissionrpc library (v%d)\n", + serverVersion, transmissionrpc.RPCVersion) + log.Infof("using transmission version: %d", serverVersion) + return fmt.Sprintf("transmission version: %d", serverVersion), nil +} + +func (t *Transmission) IsReady() bool { + return t.client != nil +} + +func (t *Transmission) AddURL(args *tool.AddUrlArgs) (string, error) { + endpoint, err := url.Parse(args.Url) + if err != nil { + return "", errors.Wrap(err, "failed to parse transmission uri") + } + + rpcPayload := transmissionrpc.TorrentAddPayload{ + DownloadDir: &args.TempDir, + } + // http url for .torrent file + if endpoint.Scheme == "http" || endpoint.Scheme == "https" { + resp, err := http.Get(args.Url) + if err != nil { + return "", errors.Wrap(err, "failed to get .torrent file") + } + defer resp.Body.Close() + buffer := new(bytes.Buffer) + encoder := base64.NewEncoder(base64.StdEncoding, buffer) + // Stream file to the encoder + if _, err = io.Copy(encoder, resp.Body); err != nil { + return "", errors.Wrap(err, "can't copy file content into the base64 encoder") + } + // Flush last bytes + if err = encoder.Close(); err != nil { + return "", errors.Wrap(err, "can't flush last bytes of the base64 encoder") + } + // Get the string form + b64 := buffer.String() + rpcPayload.MetaInfo = &b64 + } else { // magnet uri + rpcPayload.Filename = &args.Url + } + + torrent, err := t.client.TorrentAdd(context.TODO(), rpcPayload) + if err != nil { + return "", err + } + + if torrent.ID == nil { + return "", fmt.Errorf("failed get torrent ID") + } + gid := strconv.FormatInt(*torrent.ID, 10) + return gid, nil +} + +func (t *Transmission) Remove(task *tool.DownloadTask) error { + gid, err := strconv.ParseInt(task.GID, 10, 64) + if err != nil { + return err + } + err = t.client.TorrentRemove(context.TODO(), transmissionrpc.TorrentRemovePayload{ + IDs: []int64{gid}, + DeleteLocalData: false, + }) + return err +} + +func (t *Transmission) Status(task *tool.DownloadTask) (*tool.Status, error) { + gid, err := strconv.ParseInt(task.GID, 10, 64) + if err != nil { + return nil, err + } + infos, err := t.client.TorrentGetAllFor(context.TODO(), []int64{gid}) + if err != nil { + return nil, err + } + + if len(infos) < 1 { + return nil, fmt.Errorf("failed get status, wrong gid: %s", task.GID) + } + info := infos[0] + + s := &tool.Status{ + Completed: *info.IsFinished, + Err: err, + } + s.Progress = *info.PercentDone * 100 + + switch *info.Status { + case transmissionrpc.TorrentStatusCheckWait, + transmissionrpc.TorrentStatusDownloadWait, + transmissionrpc.TorrentStatusCheck, + transmissionrpc.TorrentStatusDownload, + transmissionrpc.TorrentStatusIsolated: + s.Status = "[transmission] " + info.Status.String() + case transmissionrpc.TorrentStatusSeedWait, + transmissionrpc.TorrentStatusSeed: + s.Completed = true + case transmissionrpc.TorrentStatusStopped: + s.Err = errors.Errorf("[transmission] failed to download %s, status: %s, error: %s", task.GID, info.Status.String(), *info.ErrorString) + default: + s.Err = errors.Errorf("[transmission] unknown status occurred downloading %s, err: %s", task.GID, *info.ErrorString) + } + return s, nil +} + +var _ tool.Tool = (*Transmission)(nil) + +func init() { + tool.Tools.Add(&Transmission{}) +} diff --git a/server/handles/offline_download.go b/server/handles/offline_download.go index 0b019e9e..1c5f9555 100644 --- a/server/handles/offline_download.go +++ b/server/handles/offline_download.go @@ -30,6 +30,10 @@ func SetAria2(c *gin.Context) { return } _tool, err := tool.Tools.Get("aria2") + if err != nil { + common.ErrorResp(c, err, 500) + return + } version, err := _tool.Init() if err != nil { common.ErrorResp(c, err, 500) @@ -74,6 +78,37 @@ func OfflineDownloadTools(c *gin.Context) { common.SuccessResp(c, tools) } +type SetTransmissionReq struct { + Uri string `json:"uri" form:"uri"` + Seedtime string `json:"seedtime" form:"seedtime"` +} + +func SetTransmission(c *gin.Context) { + var req SetTransmissionReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + items := []model.SettingItem{ + {Key: conf.TransmissionUri, Value: req.Uri, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + {Key: conf.TransmissionSeedtime, Value: req.Seedtime, Type: conf.TypeNumber, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } + if err := op.SaveSettingItems(items); err != nil { + common.ErrorResp(c, err, 500) + return + } + _tool, err := tool.Tools.Get("transmission") + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if _, err := _tool.Init(); err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, "ok") +} + type AddOfflineDownloadReq struct { Urls []string `json:"urls"` Path string `json:"path"` diff --git a/server/router.go b/server/router.go index 5be593f7..07423f92 100644 --- a/server/router.go +++ b/server/router.go @@ -62,7 +62,7 @@ func Init(e *gin.Engine) { api.GET("/auth/get_sso_id", handles.SSOLoginCallback) api.GET("/auth/sso_get_token", handles.SSOLoginCallback) - //webauthn + // webauthn webauthn.GET("/webauthn_begin_registration", handles.BeginAuthnRegistration) webauthn.POST("/webauthn_finish_registration", handles.FinishAuthnRegistration) webauthn.GET("/webauthn_begin_login", handles.BeginAuthnLogin) @@ -125,6 +125,7 @@ func admin(g *gin.RouterGroup) { setting.POST("/reset_token", handles.ResetToken) setting.POST("/set_aria2", handles.SetAria2) setting.POST("/set_qbit", handles.SetQbittorrent) + setting.POST("/set_transmission", handles.SetTransmission) task := g.Group("/task") handles.SetupTaskRoute(task) @@ -159,14 +160,15 @@ func _fs(g *gin.RouterGroup) { g.PUT("/put", middlewares.FsUp, handles.FsStream) g.PUT("/form", middlewares.FsUp, handles.FsForm) g.POST("/link", middlewares.AuthAdmin, handles.Link) - //g.POST("/add_aria2", handles.AddOfflineDownload) - //g.POST("/add_qbit", handles.AddQbittorrent) + // g.POST("/add_aria2", handles.AddOfflineDownload) + // g.POST("/add_qbit", handles.AddQbittorrent) + // g.POST("/add_transmission", handles.SetTransmission) g.POST("/add_offline_download", handles.AddOfflineDownload) } func Cors(r *gin.Engine) { config := cors.DefaultConfig() - //config.AllowAllOrigins = true + // config.AllowAllOrigins = true config.AllowOrigins = conf.Conf.Cors.AllowOrigins config.AllowHeaders = conf.Conf.Cors.AllowHeaders config.AllowMethods = conf.Conf.Cors.AllowMethods From 5f19d73fcc57d85c6d40753f823867c534b45b36 Mon Sep 17 00:00:00 2001 From: URenko <18209292+URenko@users.noreply.github.com> Date: Fri, 4 Oct 2024 07:46:10 +0000 Subject: [PATCH 003/187] fix: Terabox ( close #6961 close #6983 in #7279) --- drivers/terabox/driver.go | 66 +++++++++++++++++++++++++++++++++------ drivers/terabox/types.go | 4 +++ drivers/terabox/util.go | 41 ++++++++++++++---------- 3 files changed, 84 insertions(+), 27 deletions(-) diff --git a/drivers/terabox/driver.go b/drivers/terabox/driver.go index c9662fce..11db351b 100644 --- a/drivers/terabox/driver.go +++ b/drivers/terabox/driver.go @@ -11,6 +11,7 @@ import ( stdpath "path" "strconv" "strings" + "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/pkg/utils" @@ -23,7 +24,9 @@ import ( type Terabox struct { model.Storage Addition - JsToken string + JsToken string + url_domain_prefix string + base_url string } func (d *Terabox) Config() driver.Config { @@ -36,6 +39,8 @@ func (d *Terabox) GetAddition() driver.Additional { func (d *Terabox) Init(ctx context.Context) error { var resp CheckLoginResp + d.base_url = "https://www.terabox.com" + d.url_domain_prefix = "jp" _, err := d.get("/api/check/login", nil, &resp) if err != nil { return err @@ -71,7 +76,16 @@ func (d *Terabox) Link(ctx context.Context, file model.Obj, args model.LinkArgs) } func (d *Terabox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { - _, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "") + params := map[string]string{ + "a": "commit", + } + data := map[string]string{ + "path": stdpath.Join(parentDir.GetPath(), dirName), + "isdir": "1", + "block_list": "[]", + } + res, err := d.post_form("/api/create", params, data, nil) + log.Debugln(string(res)) return err } @@ -117,6 +131,20 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error { } func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + resp, err := base.RestyClient.R(). + SetContext(ctx). + Get("https://" + d.url_domain_prefix + "-data.terabox.com/rest/2.0/pcs/file?method=locateupload") + if err != nil { + return err + } + var locateupload_resp LocateUploadResp + err = utils.Json.Unmarshal(resp.Body(), &locateupload_resp) + if err != nil { + log.Debugln(resp) + return err + } + log.Debugln(locateupload_resp) + tempFile, err := stream.CacheFullInTempFile() if err != nil { return err @@ -157,23 +185,28 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName()) path := encodeURIComponent(rawPath) block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ",")) - data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s", - path, stream.GetSize(), - block_list_str) - params := map[string]string{} + data := map[string]string{ + "path": rawPath, + "autoinit": "1", + "target_path": dstDir.GetPath(), + "block_list": block_list_str, + "local_mtime": strconv.FormatInt(time.Now().Unix(), 10), + } var precreateResp PrecreateResp - _, err = d.post("/api/precreate", params, data, &precreateResp) + log.Debugln(data) + res, err := d.post_form("/api/precreate", nil, data, &precreateResp) if err != nil { return err } log.Debugf("%+v", precreateResp) if precreateResp.Errno != 0 { + log.Debugln(string(res)) return fmt.Errorf("[terabox] failed to precreate file, errno: %d", precreateResp.Errno) } if precreateResp.ReturnType == 2 { return nil } - params = map[string]string{ + params := map[string]string{ "method": "upload", "path": path, "uploadid": precreateResp.Uploadid, @@ -200,7 +233,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt if err != nil { return err } - u := "https://c-jp.terabox.com/rest/2.0/pcs/superfile2" + u := "https://" + locateupload_resp.Host + "/rest/2.0/pcs/superfile2" params["partseq"] = strconv.Itoa(partseq) res, err := base.RestyClient.R(). SetContext(ctx). @@ -216,7 +249,20 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt up(float64(i) * 100 / float64(len(precreateResp.BlockList))) } } - _, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str) + params = map[string]string{ + "isdir": "0", + "rtype": "1", + } + data = map[string]string{ + "path": rawPath, + "size": strconv.FormatInt(stream.GetSize(), 10), + "uploadid": precreateResp.Uploadid, + "target_path": dstDir.GetPath(), + "block_list": block_list_str, + "local_mtime": strconv.FormatInt(time.Now().Unix(), 10), + } + res, err = d.post_form("/api/create", params, data, nil) + log.Debugln(string(res)) return err } diff --git a/drivers/terabox/types.go b/drivers/terabox/types.go index 890d5305..8bdbc6fc 100644 --- a/drivers/terabox/types.go +++ b/drivers/terabox/types.go @@ -95,3 +95,7 @@ type PrecreateResp struct { type CheckLoginResp struct { Errno int `json:"errno"` } + +type LocateUploadResp struct { + Host string `json:"host"` +} diff --git a/drivers/terabox/util.go b/drivers/terabox/util.go index 0a4e7879..e0f3d74e 100644 --- a/drivers/terabox/util.go +++ b/drivers/terabox/util.go @@ -14,6 +14,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" ) func getStrBetween(raw, start, end string) string { @@ -28,11 +29,11 @@ func getStrBetween(raw, start, end string) string { } func (d *Terabox) resetJsToken() error { - u := "https://www.terabox.com/main" + u := d.base_url res, err := base.RestyClient.R().SetHeaders(map[string]string{ "Cookie": d.Cookie, "Accept": "application/json, text/plain, */*", - "Referer": "https://www.terabox.com/", + "Referer": d.base_url, "User-Agent": base.UserAgent, "X-Requested-With": "XMLHttpRequest", }).Get(u) @@ -48,12 +49,12 @@ func (d *Terabox) resetJsToken() error { return nil } -func (d *Terabox) request(furl string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) { +func (d *Terabox) request(rurl string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) { req := base.RestyClient.R() req.SetHeaders(map[string]string{ "Cookie": d.Cookie, "Accept": "application/json, text/plain, */*", - "Referer": "https://www.terabox.com/", + "Referer": d.base_url, "User-Agent": base.UserAgent, "X-Requested-With": "XMLHttpRequest", }) @@ -70,7 +71,7 @@ func (d *Terabox) request(furl string, method string, callback base.ReqCallback, if resp != nil { req.SetResult(resp) } - res, err := req.Execute(method, furl) + res, err := req.Execute(method, d.base_url+rurl) if err != nil { return nil, err } @@ -82,14 +83,20 @@ func (d *Terabox) request(furl string, method string, callback base.ReqCallback, return nil, err } if !utils.IsBool(noRetry...) { - return d.request(furl, method, callback, resp, true) + return d.request(rurl, method, callback, resp, true) } + } else if errno == -6 { + log.Debugln(res.Header()) + d.url_domain_prefix = res.Header()["Url-Domain-Prefix"][0] + d.base_url = "https://" + d.url_domain_prefix + ".terabox.com" + log.Debugln("Redirect base_url to", d.base_url) + return d.request(rurl, method, callback, resp, noRetry...) } return res.Body(), nil } func (d *Terabox) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) { - return d.request("https://www.terabox.com"+pathname, http.MethodGet, func(req *resty.Request) { + return d.request(pathname, http.MethodGet, func(req *resty.Request) { if params != nil { req.SetQueryParams(params) } @@ -97,7 +104,7 @@ func (d *Terabox) get(pathname string, params map[string]string, resp interface{ } func (d *Terabox) post(pathname string, params map[string]string, data interface{}, resp interface{}) ([]byte, error) { - return d.request("https://www.terabox.com"+pathname, http.MethodPost, func(req *resty.Request) { + return d.request(pathname, http.MethodPost, func(req *resty.Request) { if params != nil { req.SetQueryParams(params) } @@ -105,6 +112,15 @@ func (d *Terabox) post(pathname string, params map[string]string, data interface }, resp) } +func (d *Terabox) post_form(pathname string, params map[string]string, data map[string]string, resp interface{}) ([]byte, error) { + return d.request(pathname, http.MethodPost, func(req *resty.Request) { + if params != nil { + req.SetQueryParams(params) + } + req.SetFormData(data) + }, resp) +} + func (d *Terabox) getFiles(dir string) ([]File, error) { page := 1 num := 100 @@ -237,15 +253,6 @@ func (d *Terabox) manage(opera string, filelist interface{}) ([]byte, error) { return d.post("/api/filemanager", params, data, nil) } -func (d *Terabox) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) { - params := map[string]string{} - data := fmt.Sprintf("path=%s&size=%d&isdir=%d", encodeURIComponent(path), size, isdir) - if uploadid != "" { - data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list) - } - return d.post("/api/create", params, data, nil) -} - func encodeURIComponent(str string) string { r := url.QueryEscape(str) r = strings.ReplaceAll(r, "+", "%20") From c3e43ff60588f52f7b8e41c41c99980093b289fb Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Sat, 12 Oct 2024 00:48:54 +0800 Subject: [PATCH 004/187] fix(115): use latest appVer for upload (close #7315) --- drivers/115/appver.go | 43 +++++++++++++++++++++++++++++++++++++++++++ drivers/115/driver.go | 7 +++++-- drivers/115/util.go | 41 ++++++++++++++++++----------------------- 3 files changed, 66 insertions(+), 25 deletions(-) create mode 100644 drivers/115/appver.go diff --git a/drivers/115/appver.go b/drivers/115/appver.go new file mode 100644 index 00000000..78e11a54 --- /dev/null +++ b/drivers/115/appver.go @@ -0,0 +1,43 @@ +package _115 + +import ( + driver115 "github.com/SheltonZhu/115driver/pkg/driver" + "github.com/alist-org/alist/v3/drivers/base" + log "github.com/sirupsen/logrus" +) + +var ( + md5Salt = "Qclm8MGWUv59TnrR0XPg" + appVer = "27.0.5.7" +) + +func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) { + result := driver115.VersionResp{} + resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion) + + err = driver115.CheckErr(err, &result, resp) + if err != nil { + return nil, err + } + + return result.Data.GetAppVersions(), nil +} + +func (d *Pan115) getAppVer() string { + // todo add some cache? + vers, err := d.getAppVersion() + if err != nil { + log.Warnf("[115] get app version failed: %v", err) + return appVer + } + for _, ver := range vers { + if ver.AppName == "win" { + return ver.Version + } + } + return appVer +} + +func (d *Pan115) initAppVer() { + appVer = d.getAppVer() +} diff --git a/drivers/115/driver.go b/drivers/115/driver.go index 2a1c8dee..f6fb6b05 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -3,6 +3,7 @@ package _115 import ( "context" "strings" + "sync" driver115 "github.com/SheltonZhu/115driver/pkg/driver" "github.com/alist-org/alist/v3/internal/driver" @@ -16,8 +17,9 @@ import ( type Pan115 struct { model.Storage Addition - client *driver115.Pan115Client - limiter *rate.Limiter + client *driver115.Pan115Client + limiter *rate.Limiter + appVerOnce sync.Once } func (d *Pan115) Config() driver.Config { @@ -29,6 +31,7 @@ func (d *Pan115) GetAddition() driver.Additional { } func (d *Pan115) Init(ctx context.Context) error { + d.appVerOnce.Do(d.initAppVer) if d.LimitRate > 0 { d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1) } diff --git a/drivers/115/util.go b/drivers/115/util.go index ddddf6e9..7d5889af 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -2,7 +2,9 @@ package _115 import ( "bytes" + "crypto/md5" "crypto/tls" + "encoding/hex" "encoding/json" "fmt" "io" @@ -26,12 +28,12 @@ import ( "github.com/pkg/errors" ) -var UserAgent = driver115.UA115Browser +//var UserAgent = driver115.UA115Browser func (d *Pan115) login() error { var err error opts := []driver115.Option{ - driver115.UA(UserAgent), + driver115.UA(d.getUA()), func(c *driver115.Pan115Client) { c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}) }, @@ -73,25 +75,11 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) { return res, nil } -const ( - appVer = "27.0.3.7" -) - -func (c *Pan115) getAppVer() string { - // todo add some cache? - vers, err := c.client.GetAppVersion() - if err != nil { - return appVer - } - for _, ver := range vers { - if ver.AppName == "win" { - return ver.Version - } - } - return appVer +func (d *Pan115) getUA() string { + return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer) } -func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) { +func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) { key := crypto.GenerateKey() result := driver115.DownloadResp{} params, err := utils.Json.Marshal(map[string]string{"pickcode": pickCode}) @@ -105,10 +93,10 @@ func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e reqUrl := fmt.Sprintf("%s?t=%s", driver115.ApiDownloadGetUrl, driver115.Now().String()) req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - req.Header.Set("Cookie", c.Cookie) + req.Header.Set("Cookie", d.Cookie) req.Header.Set("User-Agent", ua) - resp, err := c.client.Client.GetClient().Do(req) + resp, err := d.client.Client.GetClient().Do(req) if err != nil { return nil, err } @@ -146,6 +134,13 @@ func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e return nil, driver115.ErrUnexpected } +func (c *Pan115) GenerateToken(fileID, preID, timeStamp, fileSize, signKey, signVal string) string { + userID := strconv.FormatInt(c.client.UserID, 10) + userIDMd5 := md5.Sum([]byte(userID)) + tokenMd5 := md5.Sum([]byte(md5Salt + fileID + fileSize + signKey + signVal + userID + timeStamp + hex.EncodeToString(userIDMd5[:]) + appVer)) + return hex.EncodeToString(tokenMd5[:]) +} + func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) { var ( ecdhCipher *cipher.EcdhCipher @@ -165,7 +160,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri userID := strconv.FormatInt(d.client.UserID, 10) form := url.Values{} form.Set("appid", "0") - form.Set("appversion", d.getAppVer()) + form.Set("appversion", appVer) form.Set("userid", userID) form.Set("filename", fileName) form.Set("filesize", fileSizeStr) @@ -186,7 +181,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri } form.Set("t", t.String()) - form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal)) + form.Set("token", d.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal)) if signKey != "" && signVal != "" { form.Set("sign_key", signKey) form.Set("sign_val", signVal) From e8538bd215a3c25a0b3c234a0aa5a470d972f436 Mon Sep 17 00:00:00 2001 From: YangXu <47767754+Three-taile-dragon@users.noreply.github.com> Date: Mon, 14 Oct 2024 22:44:20 +0800 Subject: [PATCH 005/187] feat: add `febbox` driver (#7304 close #7293) --- drivers/all.go | 1 + drivers/febbox/driver.go | 132 +++++++++++++++++++++++ drivers/febbox/meta.go | 36 +++++++ drivers/febbox/oauth2.go | 88 +++++++++++++++ drivers/febbox/types.go | 123 +++++++++++++++++++++ drivers/febbox/util.go | 224 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 604 insertions(+) create mode 100644 drivers/febbox/driver.go create mode 100644 drivers/febbox/meta.go create mode 100644 drivers/febbox/oauth2.go create mode 100644 drivers/febbox/types.go create mode 100644 drivers/febbox/util.go diff --git a/drivers/all.go b/drivers/all.go index 40062a1a..4c4ef5c1 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -22,6 +22,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/cloudreve" _ "github.com/alist-org/alist/v3/drivers/crypt" _ "github.com/alist-org/alist/v3/drivers/dropbox" + _ "github.com/alist-org/alist/v3/drivers/febbox" _ "github.com/alist-org/alist/v3/drivers/ftp" _ "github.com/alist-org/alist/v3/drivers/google_drive" _ "github.com/alist-org/alist/v3/drivers/google_photo" diff --git a/drivers/febbox/driver.go b/drivers/febbox/driver.go new file mode 100644 index 00000000..55c3aa21 --- /dev/null +++ b/drivers/febbox/driver.go @@ -0,0 +1,132 @@ +package febbox + +import ( + "context" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" +) + +type FebBox struct { + model.Storage + Addition + accessToken string + oauth2Token oauth2.TokenSource +} + +func (d *FebBox) Config() driver.Config { + return config +} + +func (d *FebBox) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *FebBox) Init(ctx context.Context) error { + // 初始化 oauth2Config + oauth2Config := &clientcredentials.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + AuthStyle: oauth2.AuthStyleInParams, + TokenURL: "https://api.febbox.com/oauth/token", + } + + d.initializeOAuth2Token(ctx, oauth2Config, d.Addition.RefreshToken) + + token, err := d.oauth2Token.Token() + if err != nil { + return err + } + d.accessToken = token.AccessToken + d.Addition.RefreshToken = token.RefreshToken + op.MustSaveDriverStorage(d) + + return nil +} + +func (d *FebBox) Drop(ctx context.Context) error { + return nil +} + +func (d *FebBox) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + files, err := d.getFilesList(dir.GetID()) + if err != nil { + return nil, err + } + return utils.SliceConvert(files, func(src File) (model.Obj, error) { + return fileToObj(src), nil + }) +} + +func (d *FebBox) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var ip string + if d.Addition.UserIP != "" { + ip = d.Addition.UserIP + } else { + ip = args.IP + } + + url, err := d.getDownloadLink(file.GetID(), ip) + if err != nil { + return nil, err + } + return &model.Link{ + URL: url, + }, nil +} + +func (d *FebBox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + err := d.makeDir(parentDir.GetID(), dirName) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (d *FebBox) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + err := d.move(srcObj.GetID(), dstDir.GetID()) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (d *FebBox) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + err := d.rename(srcObj.GetID(), newName) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (d *FebBox) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + err := d.copy(srcObj.GetID(), dstDir.GetID()) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (d *FebBox) Remove(ctx context.Context, obj model.Obj) error { + err := d.remove(obj.GetID()) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + return nil, errs.NotImplement +} + +var _ driver.Driver = (*FebBox)(nil) diff --git a/drivers/febbox/meta.go b/drivers/febbox/meta.go new file mode 100644 index 00000000..1daeeea8 --- /dev/null +++ b/drivers/febbox/meta.go @@ -0,0 +1,36 @@ +package febbox + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootID + ClientID string `json:"client_id" required:"true" default:""` + ClientSecret string `json:"client_secret" required:"true" default:""` + RefreshToken string + SortRule string `json:"sort_rule" required:"true" type:"select" options:"size_asc,size_desc,name_asc,name_desc,update_asc,update_desc,ext_asc,ext_desc" default:"name_asc"` + PageSize int64 `json:"page_size" required:"true" type:"number" default:"100" help:"list api per page size of FebBox driver"` + UserIP string `json:"user_ip" default:"" help:"user ip address for download link which can speed up the download"` +} + +var config = driver.Config{ + Name: "FebBox", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: true, + NeedMs: false, + DefaultRoot: "0", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &FebBox{} + }) +} diff --git a/drivers/febbox/oauth2.go b/drivers/febbox/oauth2.go new file mode 100644 index 00000000..6345d1a7 --- /dev/null +++ b/drivers/febbox/oauth2.go @@ -0,0 +1,88 @@ +package febbox + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +type customTokenSource struct { + config *clientcredentials.Config + ctx context.Context + refreshToken string +} + +func (c *customTokenSource) Token() (*oauth2.Token, error) { + v := url.Values{} + if c.refreshToken != "" { + v.Set("grant_type", "refresh_token") + v.Set("refresh_token", c.refreshToken) + } else { + v.Set("grant_type", "client_credentials") + } + + v.Set("client_id", c.config.ClientID) + v.Set("client_secret", c.config.ClientSecret) + + req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := http.DefaultClient.Do(req.WithContext(c.ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, errors.New("oauth2: cannot fetch token") + } + + var tokenResp struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data struct { + AccessToken string `json:"access_token"` + ExpiresIn int64 `json:"expires_in"` + TokenType string `json:"token_type"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` + } `json:"data"` + } + + if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil { + return nil, err + } + + if tokenResp.Code != 1 { + return nil, errors.New("oauth2: server response error") + } + + c.refreshToken = tokenResp.Data.RefreshToken + + token := &oauth2.Token{ + AccessToken: tokenResp.Data.AccessToken, + TokenType: tokenResp.Data.TokenType, + RefreshToken: tokenResp.Data.RefreshToken, + Expiry: time.Now().Add(time.Duration(tokenResp.Data.ExpiresIn) * time.Second), + } + + return token, nil +} + +func (d *FebBox) initializeOAuth2Token(ctx context.Context, oauth2Config *clientcredentials.Config, refreshToken string) { + d.oauth2Token = oauth2.ReuseTokenSource(nil, &customTokenSource{ + config: oauth2Config, + ctx: ctx, + refreshToken: refreshToken, + }) +} diff --git a/drivers/febbox/types.go b/drivers/febbox/types.go new file mode 100644 index 00000000..2ac6d6b7 --- /dev/null +++ b/drivers/febbox/types.go @@ -0,0 +1,123 @@ +package febbox + +import ( + "fmt" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" + "strconv" + "time" +) + +type ErrResp struct { + ErrorCode int64 `json:"code"` + ErrorMsg string `json:"msg"` + ServerRunTime float64 `json:"server_runtime"` + ServerName string `json:"server_name"` +} + +func (e *ErrResp) IsError() bool { + return e.ErrorCode != 0 || e.ErrorMsg != "" || e.ServerRunTime != 0 || e.ServerName != "" +} + +func (e *ErrResp) Error() string { + return fmt.Sprintf("ErrorCode: %d ,Error: %s ,ServerRunTime: %f ,ServerName: %s", e.ErrorCode, e.ErrorMsg, e.ServerRunTime, e.ServerName) +} + +type FileListResp struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data struct { + FileList []File `json:"file_list"` + ShowType string `json:"show_type"` + } `json:"data"` +} + +type Rules struct { + AllowCopy int64 `json:"allow_copy"` + AllowDelete int64 `json:"allow_delete"` + AllowDownload int64 `json:"allow_download"` + AllowComment int64 `json:"allow_comment"` + HideLocation int64 `json:"hide_location"` +} + +type File struct { + Fid int64 `json:"fid"` + UID int64 `json:"uid"` + FileSize int64 `json:"file_size"` + Path string `json:"path"` + FileName string `json:"file_name"` + Ext string `json:"ext"` + AddTime int64 `json:"add_time"` + FileCreateTime int64 `json:"file_create_time"` + FileUpdateTime int64 `json:"file_update_time"` + ParentID int64 `json:"parent_id"` + UpdateTime int64 `json:"update_time"` + LastOpenTime int64 `json:"last_open_time"` + IsDir int64 `json:"is_dir"` + Epub int64 `json:"epub"` + IsMusicList int64 `json:"is_music_list"` + OssFid int64 `json:"oss_fid"` + Faststart int64 `json:"faststart"` + HasVideoQuality int64 `json:"has_video_quality"` + TotalDownload int64 `json:"total_download"` + Status int64 `json:"status"` + Remark string `json:"remark"` + OldHash string `json:"old_hash"` + Hash string `json:"hash"` + HashType string `json:"hash_type"` + FromUID int64 `json:"from_uid"` + FidOrg int64 `json:"fid_org"` + ShareID int64 `json:"share_id"` + InvitePermission int64 `json:"invite_permission"` + ThumbSmall string `json:"thumb_small"` + ThumbSmallWidth int64 `json:"thumb_small_width"` + ThumbSmallHeight int64 `json:"thumb_small_height"` + Thumb string `json:"thumb"` + ThumbWidth int64 `json:"thumb_width"` + ThumbHeight int64 `json:"thumb_height"` + ThumbBig string `json:"thumb_big"` + ThumbBigWidth int64 `json:"thumb_big_width"` + ThumbBigHeight int64 `json:"thumb_big_height"` + IsCustomThumb int64 `json:"is_custom_thumb"` + Photos int64 `json:"photos"` + IsAlbum int64 `json:"is_album"` + ReadOnly int64 `json:"read_only"` + Rules Rules `json:"rules"` + IsShared int64 `json:"is_shared"` +} + +func fileToObj(f File) *model.ObjThumb { + return &model.ObjThumb{ + Object: model.Object{ + ID: strconv.FormatInt(f.Fid, 10), + Name: f.FileName, + Size: f.FileSize, + Ctime: time.Unix(f.FileCreateTime, 0), + Modified: time.Unix(f.FileUpdateTime, 0), + IsFolder: f.IsDir == 1, + HashInfo: utils.NewHashInfo(hash_extend.GCID, f.Hash), + }, + Thumbnail: model.Thumbnail{ + Thumbnail: f.Thumb, + }, + } +} + +type FileDownloadResp struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data []struct { + Error int `json:"error"` + DownloadURL string `json:"download_url"` + Hash string `json:"hash"` + HashType string `json:"hash_type"` + Fid int `json:"fid"` + FileName string `json:"file_name"` + ParentID int `json:"parent_id"` + FileSize int `json:"file_size"` + Ext string `json:"ext"` + Thumb string `json:"thumb"` + VipLink int `json:"vip_link"` + } `json:"data"` +} diff --git a/drivers/febbox/util.go b/drivers/febbox/util.go new file mode 100644 index 00000000..ac072edb --- /dev/null +++ b/drivers/febbox/util.go @@ -0,0 +1,224 @@ +package febbox + +import ( + "encoding/json" + "errors" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/op" + "github.com/go-resty/resty/v2" + "net/http" + "strconv" +) + +func (d *FebBox) refreshTokenByOAuth2() error { + token, err := d.oauth2Token.Token() + if err != nil { + return err + } + d.Status = "work" + d.accessToken = token.AccessToken + d.Addition.RefreshToken = token.RefreshToken + op.MustSaveDriverStorage(d) + return nil +} + +func (d *FebBox) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + req := base.RestyClient.R() + // 使用oauth2 获取 access_token + token, err := d.oauth2Token.Token() + if err != nil { + return nil, err + } + req.SetAuthScheme(token.TokenType).SetAuthToken(token.AccessToken) + + if callback != nil { + callback(req) + } + if resp != nil { + req.SetResult(resp) + } + var e ErrResp + req.SetError(&e) + res, err := req.Execute(method, url) + if err != nil { + return nil, err + } + + switch e.ErrorCode { + case 0: + return res.Body(), nil + case 1: + return res.Body(), nil + case -10001: + if e.ServerName != "" { + // access_token 过期 + if err = d.refreshTokenByOAuth2(); err != nil { + return nil, err + } + return d.request(url, method, callback, resp) + } else { + return nil, errors.New(e.Error()) + } + default: + return nil, errors.New(e.Error()) + } +} + +func (d *FebBox) getFilesList(id string) ([]File, error) { + if d.PageSize <= 0 { + d.PageSize = 100 + } + res, err := d.listWithLimit(id, d.PageSize) + if err != nil { + return nil, err + } + return *res, nil +} + +func (d *FebBox) listWithLimit(dirID string, pageLimit int64) (*[]File, error) { + var files []File + page := int64(1) + for { + result, err := d.getFiles(dirID, page, pageLimit) + if err != nil { + return nil, err + } + files = append(files, *result...) + if int64(len(*result)) < pageLimit { + break + } else { + page++ + } + } + return &files, nil +} + +func (d *FebBox) getFiles(dirID string, page, pageLimit int64) (*[]File, error) { + var fileList FileListResp + queryParams := map[string]string{ + "module": "file_list", + "parent_id": dirID, + "page": strconv.FormatInt(page, 10), + "pagelimit": strconv.FormatInt(pageLimit, 10), + "order": d.Addition.SortRule, + } + + res, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, &fileList) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(res, &fileList); err != nil { + return nil, err + } + + return &fileList.Data.FileList, nil +} + +func (d *FebBox) getDownloadLink(id string, ip string) (string, error) { + var fileDownloadResp FileDownloadResp + queryParams := map[string]string{ + "module": "file_get_download_url", + "fids[]": id, + "ip": ip, + } + + res, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, &fileDownloadResp) + if err != nil { + return "", err + } + + if err = json.Unmarshal(res, &fileDownloadResp); err != nil { + return "", err + } + + return fileDownloadResp.Data[0].DownloadURL, nil +} + +func (d *FebBox) makeDir(id string, name string) error { + queryParams := map[string]string{ + "module": "create_dir", + "parent_id": id, + "name": name, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) move(id string, id2 string) error { + queryParams := map[string]string{ + "module": "file_move", + "fids[]": id, + "to": id2, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) rename(id string, name string) error { + queryParams := map[string]string{ + "module": "file_rename", + "fid": id, + "name": name, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) copy(id string, id2 string) error { + queryParams := map[string]string{ + "module": "file_copy", + "fids[]": id, + "to": id2, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) remove(id string) error { + queryParams := map[string]string{ + "module": "file_delete", + "fids[]": id, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} From 2830575490e72a7afc8fd8e6b790a163b92a13b5 Mon Sep 17 00:00:00 2001 From: hanbao233xD <39661586+hanbao233xD@users.noreply.github.com> Date: Tue, 15 Oct 2024 19:45:30 +0800 Subject: [PATCH 006/187] perf(123pan): change domain of login (#7325) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update driver.go * 1 * Update util.go * 123新登录接口 * Revert "Update util.go" This reverts commit a13a58f8a86c7c36d4fd7d91137229a7667f1fb5. * Update driver.go * Update util.go * Update util.go --- drivers/123/driver.go | 1 + drivers/123/util.go | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/123/driver.go b/drivers/123/driver.go index aeda7fcf..3620431d 100644 --- a/drivers/123/driver.go +++ b/drivers/123/driver.go @@ -82,6 +82,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) "type": f.Type, } resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) { + req.SetBody(data).SetHeaders(headers) }, nil) if err != nil { diff --git a/drivers/123/util.go b/drivers/123/util.go index 73c73b3b..6365b1c9 100644 --- a/drivers/123/util.go +++ b/drivers/123/util.go @@ -26,8 +26,9 @@ const ( Api = "https://www.123pan.com/api" AApi = "https://www.123pan.com/a/api" BApi = "https://www.123pan.com/b/api" + LoginApi = "https://login.123pan.com/api" MainApi = BApi - SignIn = MainApi + "/user/sign_in" + SignIn = LoginApi + "/user/sign_in" Logout = MainApi + "/user/logout" UserInfo = MainApi + "/user/info" FileList = MainApi + "/file/list/new" From 48ac23c8de98e1bf6b6acf51e795c75b451493a7 Mon Sep 17 00:00:00 2001 From: Jason-Fly <869914918@qq.com> Date: Sun, 20 Oct 2024 23:53:40 +0800 Subject: [PATCH 007/187] fix(ilanzou): fix infinite loop when getting file list (#7366 close #7357) --- drivers/ilanzou/driver.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/ilanzou/driver.go b/drivers/ilanzou/driver.go index ab5ebe7e..24fcc436 100644 --- a/drivers/ilanzou/driver.go +++ b/drivers/ilanzou/driver.go @@ -66,12 +66,13 @@ func (d *ILanZou) Drop(ctx context.Context) error { } func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + offset := 1 var res []ListItem for { var resp ListResp _, err := d.proved("/record/file/list", http.MethodGet, func(req *resty.Request) { params := []string{ - "offset=1", + "offset=" + strconv.Itoa(offset), "limit=60", "folderId=" + dir.GetID(), "type=0", @@ -83,7 +84,9 @@ func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) return nil, err } res = append(res, resp.List...) - if resp.TotalPage <= resp.Offset { + if resp.Offset < resp.TotalPage { + offset++ + } else { break } } From a2dc45a80bd15a3e7373a257bd2e81a02632d1b5 Mon Sep 17 00:00:00 2001 From: Jason-Fly <869914918@qq.com> Date: Sun, 20 Oct 2024 23:53:56 +0800 Subject: [PATCH 008/187] fix(ilanzou): fix upload failure for small files (#7368 close #7250) --- drivers/ilanzou/driver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ilanzou/driver.go b/drivers/ilanzou/driver.go index 24fcc436..90ef7c1a 100644 --- a/drivers/ilanzou/driver.go +++ b/drivers/ilanzou/driver.go @@ -289,7 +289,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt req.SetBody(base.Json{ "fileId": "", "fileName": stream.GetName(), - "fileSize": stream.GetSize() / 1024, + "fileSize": stream.GetSize()/1024 + 1, "folderId": dstDir.GetID(), "md5": etag, "type": 1, From a701432b8bf5b43a78382d75e9090ed66c03a570 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Mon, 21 Oct 2024 00:05:56 +0800 Subject: [PATCH 009/187] ci: add freebsd to beta release --- .github/workflows/beta_release.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/beta_release.yml b/.github/workflows/beta_release.yml index 32073eb9..90c2836f 100644 --- a/.github/workflows/beta_release.yml +++ b/.github/workflows/beta_release.yml @@ -8,6 +8,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true +permissions: + contents: write + jobs: changelog: strategy: @@ -54,7 +57,7 @@ jobs: strategy: matrix: include: - - target: '!(*musl*|*windows-arm64*|*android*)' # xgo + - target: '!(*musl*|*windows-arm64*|*android*|*freebsd*)' # xgo hash: "md5" - target: 'linux-!(arm*)-musl*' #musl-not-arm hash: "md5-linux-musl" @@ -64,6 +67,9 @@ jobs: hash: "md5-windows-arm64" - target: 'android-*' #android hash: "md5-android" + - target: 'freebsd-*' #freebsd + hash: "md5-freebsd" + name: Beta Release runs-on: ubuntu-latest steps: From 216e3909f3946eb9c1b786c0d82c00f278f0ea25 Mon Sep 17 00:00:00 2001 From: Shelton Zhu <498220739@qq.com> Date: Fri, 1 Nov 2024 20:52:19 +0800 Subject: [PATCH 010/187] fix(115): enforce 20GB file size limit on uploadev (#7447 close #7413) - Introduce a file size restriction to handle uploads more securely. - Provide an informative error for uploads that exceed the new limit. --- drivers/115/driver.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/115/driver.go b/drivers/115/driver.go index f6fb6b05..4857c1ec 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -2,6 +2,7 @@ package _115 import ( "context" + "fmt" "strings" "sync" @@ -121,7 +122,10 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr if err := d.WaitLimit(ctx); err != nil { return err } - + if stream.GetSize() > utils.GB*20 { // TODO 由于官方分片上传接口失效,所以使用普通上传小于20GB的文件 + return fmt.Errorf("unsupported file size: 20GB limit exceeded") + } + // 分片上传 var ( fastInfo *driver115.UploadInitResp dirID = dstDir.GetID() @@ -177,11 +181,13 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } // 闪传失败,上传 - if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传 + // if stream.GetSize() <= utils.KB{ // 文件大小小于1KB,改用普通模式上传 + if stream.GetSize() <= utils.GB*20 { // TODO 由于官方分片上传接口失效,所以使用普通上传小于20GB的文件 return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID) } + return driver115.ErrUnexpected // 分片上传 - return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID) + // return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID) } func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) { From 4955d8cec8a839b95e66c620e3ca69c1348e65eb Mon Sep 17 00:00:00 2001 From: Mmx Date: Fri, 1 Nov 2024 20:53:53 +0800 Subject: [PATCH 011/187] ci(docker): support riscv64 and ppc64le (#7426) * ci(docker): bump cache key of musl library * build(docker): add new arches to build script * ci(docker): add new arches to buildx platforms --- .github/workflows/build_docker.yml | 6 +++--- .github/workflows/release_docker.yml | 6 +++--- build.sh | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build_docker.yml b/.github/workflows/build_docker.yml index 8f37688d..6384c374 100644 --- a/.github/workflows/build_docker.yml +++ b/.github/workflows/build_docker.yml @@ -53,7 +53,7 @@ jobs: uses: actions/cache@v4 with: path: build/musl-libs - key: docker-musl-libs + key: docker-musl-libs-v2 - name: Download Musl Library if: steps.cache-musl.outputs.cache-hit != 'true' @@ -84,7 +84,7 @@ jobs: push: ${{ github.event_name == 'push' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64 - name: Build and push with ffmpeg id: docker_build_ffmpeg @@ -96,7 +96,7 @@ jobs: tags: ${{ steps.meta-ffmpeg.outputs.tags }} labels: ${{ steps.meta-ffmpeg.outputs.labels }} build-args: INSTALL_FFMPEG=true - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64 build_docker_with_aria2: needs: build_docker diff --git a/.github/workflows/release_docker.yml b/.github/workflows/release_docker.yml index 95a686b2..a2dd2dd7 100644 --- a/.github/workflows/release_docker.yml +++ b/.github/workflows/release_docker.yml @@ -22,7 +22,7 @@ jobs: uses: actions/cache@v4 with: path: build/musl-libs - key: docker-musl-libs + key: docker-musl-libs-v2 - name: Download Musl Library if: steps.cache-musl.outputs.cache-hit != 'true' @@ -58,7 +58,7 @@ jobs: push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64 - name: Docker meta with ffmpeg id: meta-ffmpeg @@ -79,7 +79,7 @@ jobs: tags: ${{ steps.meta-ffmpeg.outputs.tags }} labels: ${{ steps.meta-ffmpeg.outputs.labels }} build-args: INSTALL_FFMPEG=true - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64 release_docker_with_aria2: needs: release_docker diff --git a/build.sh b/build.sh index 18a30e63..6b28847c 100644 --- a/build.sh +++ b/build.sh @@ -93,7 +93,7 @@ BuildDocker() { PrepareBuildDockerMusl() { mkdir -p build/musl-libs BASE="https://musl.cc/" - FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross) + FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross) for i in "${FILES[@]}"; do url="${BASE}${i}.tgz" lib_tgz="build/${i}.tgz" @@ -112,8 +112,8 @@ BuildDockerMultiplatform() { docker_lflags="--extldflags '-static -fpic' $ldflags" export CGO_ENABLED=1 - OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x) - CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc) + OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le) + CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc) for i in "${!OS_ARCHES[@]}"; do os_arch=${OS_ARCHES[$i]} cgo_cc=${CGO_ARGS[$i]} From 34a148c83de62258322228b43e63b59f0b2f1801 Mon Sep 17 00:00:00 2001 From: Mmx Date: Fri, 1 Nov 2024 20:58:53 +0800 Subject: [PATCH 012/187] feat(local): thumbnail token bucket smooth migration (#7425) * feat(local): allow to migrate static token buckets * improve(local): token bucket migration boundary handling --- drivers/local/driver.go | 2 +- drivers/local/token_bucket.go | 38 +++++++++++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/drivers/local/driver.go b/drivers/local/driver.go index bf993e5d..86980943 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -76,7 +76,7 @@ func (d *Local) Init(ctx context.Context) error { if d.thumbConcurrency == 0 { d.thumbTokenBucket = NewNopTokenBucket() } else { - d.thumbTokenBucket = NewStaticTokenBucket(d.thumbConcurrency) + d.thumbTokenBucket = NewStaticTokenBucketWithMigration(d.thumbTokenBucket, d.thumbConcurrency) } return nil } diff --git a/drivers/local/token_bucket.go b/drivers/local/token_bucket.go index 38fbe73f..23c6ebd6 100644 --- a/drivers/local/token_bucket.go +++ b/drivers/local/token_bucket.go @@ -23,6 +23,38 @@ func NewStaticTokenBucket(size int) StaticTokenBucket { return StaticTokenBucket{bucket: bucket} } +func NewStaticTokenBucketWithMigration(oldBucket TokenBucket, size int) StaticTokenBucket { + if oldBucket != nil { + oldStaticBucket, ok := oldBucket.(StaticTokenBucket) + if ok { + oldSize := cap(oldStaticBucket.bucket) + migrateSize := oldSize + if size < migrateSize { + migrateSize = size + } + + bucket := make(chan struct{}, size) + for range size - migrateSize { + bucket <- struct{}{} + } + + if migrateSize != 0 { + go func() { + for range migrateSize { + <-oldStaticBucket.bucket + bucket <- struct{}{} + } + close(oldStaticBucket.bucket) + }() + } + return StaticTokenBucket{bucket: bucket} + } + } + return NewStaticTokenBucket(size) +} + +// Take channel maybe closed when local driver is modified. +// don't call Put method after the channel is closed. func (b StaticTokenBucket) Take() <-chan struct{} { return b.bucket } @@ -35,8 +67,10 @@ func (b StaticTokenBucket) Do(ctx context.Context, f func() error) error { select { case <-ctx.Done(): return ctx.Err() - case <-b.bucket: - defer b.Put() + case _, ok := <-b.Take(): + if ok { + defer b.Put() + } } return f() } From ce0b99a510c227a27c34f2b442c5e0794d2488f3 Mon Sep 17 00:00:00 2001 From: Maxwell Davis <138968347+Unic96@users.noreply.github.com> Date: Fri, 1 Nov 2024 21:12:29 +0800 Subject: [PATCH 013/187] fix(cloudreve): path not exist when moving/copying files (#7432) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: 马建军 <1432318228@qq.com> --- drivers/cloudreve/driver.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index dc6d1b13..ec0f6ef2 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -4,6 +4,7 @@ import ( "context" "io" "net/http" + "path" "strconv" "strings" @@ -90,7 +91,7 @@ func (d *Cloudreve) MakeDir(ctx context.Context, parentDir model.Obj, dirName st func (d *Cloudreve) Move(ctx context.Context, srcObj, dstDir model.Obj) error { body := base.Json{ "action": "move", - "src_dir": srcObj.GetPath(), + "src_dir": path.Dir(srcObj.GetPath()), "dst": dstDir.GetPath(), "src": convertSrc(srcObj), } @@ -112,7 +113,7 @@ func (d *Cloudreve) Rename(ctx context.Context, srcObj model.Obj, newName string func (d *Cloudreve) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { body := base.Json{ - "src_dir": srcObj.GetPath(), + "src_dir": path.Dir(srcObj.GetPath()), "dst": dstDir.GetPath(), "src": convertSrc(srcObj), } From d0cda62703f48d45abebd25506fa11e0eea54a24 Mon Sep 17 00:00:00 2001 From: UUBulb <35923940+uubulb@users.noreply.github.com> Date: Fri, 1 Nov 2024 21:37:53 +0800 Subject: [PATCH 014/187] ci: add freebsd release build (#7344) --- .github/workflows/release_freebsd.yml | 35 +++++++++++++++++++++++++++ build.sh | 31 ++++++++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 .github/workflows/release_freebsd.yml diff --git a/.github/workflows/release_freebsd.yml b/.github/workflows/release_freebsd.yml new file mode 100644 index 00000000..46afb326 --- /dev/null +++ b/.github/workflows/release_freebsd.yml @@ -0,0 +1,35 @@ +name: release_freebsd + +on: + release: + types: [ published ] + +jobs: + release_freebsd: + strategy: + matrix: + platform: [ ubuntu-latest ] + go-version: [ '1.21' ] + name: Release + runs-on: ${{ matrix.platform }} + steps: + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Build + run: | + bash build.sh release freebsd + + - name: Upload assets + uses: softprops/action-gh-release@v2 + with: + tag_name: dev + files: build/compress/* diff --git a/build.sh b/build.sh index 6b28847c..a87eabf4 100644 --- a/build.sh +++ b/build.sh @@ -233,6 +233,29 @@ BuildReleaseAndroid() { done } +BuildReleaseFreeBSD() { + rm -rf .git/ + mkdir -p "build/freebsd" + OS_ARCHES=(amd64 arm64 i386) + GO_ARCHES=(amd64 arm64 386) + CGO_ARGS=(x86_64-unknown-freebsd14.1 aarch64-unknown-freebsd14.1 i386-unknown-freebsd14.1) + for i in "${!OS_ARCHES[@]}"; do + os_arch=${OS_ARCHES[$i]} + cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}" + echo building for freebsd-${os_arch} + sudo mkdir -p "/opt/freebsd/${os_arch}" + wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz + sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch} + rm base.txz + export GOOS=freebsd + export GOARCH=${GO_ARCHES[$i]} + export CC=${cgo_cc} + export CGO_ENABLED=1 + export CGO_LDFLAGS="-fuse-ld=lld" + go build -o ./build/$appName-freebsd-$os_arch -ldflags="$ldflags" -tags=jsoniter . + done +} + MakeRelease() { cd build mkdir compress @@ -251,6 +274,11 @@ MakeRelease() { tar -czvf compress/"$i".tar.gz alist rm -f alist done + for i in $(find . -type f -name "$appName-freebsd-*"); do + cp "$i" alist + tar -czvf compress/"$i".tar.gz alist + rm -f alist + done for i in $(find . -type f -name "$appName-windows-*"); do cp "$i" alist.exe zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe @@ -288,6 +316,9 @@ elif [ "$1" = "release" ]; then elif [ "$2" = "android" ]; then BuildReleaseAndroid MakeRelease "md5-android.txt" + elif [ "$2" = "freebsd" ]; then + BuildReleaseFreeBSD + MakeRelease "md5-freebsd.txt" elif [ "$2" = "web" ]; then echo "web only" else From 10c7ebb1c0c917e848c53453f4644ef1f7b18922 Mon Sep 17 00:00:00 2001 From: Rirmach Date: Fri, 1 Nov 2024 23:31:33 +0800 Subject: [PATCH 015/187] fix(local): cross-device file move (#7430) --- drivers/local/driver.go | 36 ++++++++++++++++++++++-------------- go.mod | 1 + go.sum | 2 ++ 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/drivers/local/driver.go b/drivers/local/driver.go index 86980943..c39cec10 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -22,6 +22,7 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/alist-org/times" + cp "github.com/otiai10/copy" log "github.com/sirupsen/logrus" _ "golang.org/x/image/webp" ) @@ -241,11 +242,22 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error { if utils.IsSubPath(srcPath, dstPath) { return fmt.Errorf("the destination folder is a subfolder of the source folder") } - err := os.Rename(srcPath, dstPath) - if err != nil { + if err := os.Rename(srcPath, dstPath); err != nil && strings.Contains(err.Error(), "invalid cross-device link") { + // Handle cross-device file move in local driver + if err = d.Copy(ctx, srcObj, dstDir); err != nil { + return err + } else { + // Directly remove file without check recycle bin if successfully copied + if srcObj.IsDir() { + err = os.RemoveAll(srcObj.GetPath()) + } else { + err = os.Remove(srcObj.GetPath()) + } + return err + } + } else { return err } - return nil } func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error { @@ -258,22 +270,18 @@ func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) er return nil } -func (d *Local) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { +func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error { srcPath := srcObj.GetPath() dstPath := filepath.Join(dstDir.GetPath(), srcObj.GetName()) if utils.IsSubPath(srcPath, dstPath) { return fmt.Errorf("the destination folder is a subfolder of the source folder") } - var err error - if srcObj.IsDir() { - err = utils.CopyDir(srcPath, dstPath) - } else { - err = utils.CopyFile(srcPath, dstPath) - } - if err != nil { - return err - } - return nil + // Copy using otiai10/copy to perform more secure & efficient copy + return cp.Copy(srcPath, dstPath, cp.Options{ + Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS + PreserveTimes: true, + NumOfWorkers: 0, // Serialized copy without using goroutine + }) } func (d *Local) Remove(ctx context.Context, obj model.Obj) error { diff --git a/go.mod b/go.mod index 9b9d859d..45e2c643 100644 --- a/go.mod +++ b/go.mod @@ -189,6 +189,7 @@ require ( github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect + github.com/otiai10/copy v1.14.0 github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect diff --git a/go.sum b/go.sum index f4699bc2..420a259f 100644 --- a/go.sum +++ b/go.sum @@ -391,6 +391,8 @@ github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 h1:K3T3eu4h5aYIOzUtLjN08L4Qt4WGaJONMgcaD0ayBJQ= github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831/go.mod h1:lSHD4lC4zlMl+zcoysdJcd5KFzsWwOD8BJbyg1Ws9Ng= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= From 64ceb5afb6ea94b0a71367c4d9cfa4a6a68dddc3 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Fri, 1 Nov 2024 23:32:26 +0800 Subject: [PATCH 016/187] feat: support general users view and cancel own tasks (#7416 close #7398) * feat: support general users view and cancel own tasks Add a creator attribute to the upload, copy and offline download tasks, so that a GENERAL task creator can view and cancel them. BREAKING CHANGE: 1. A new internal package `task` including the struct `TaskWithCreator` which embeds `tache.Base` is created, and the past dependence on `tache.Task` will all be transferred to dependence on this package. 2. The API `/admin/task` can now also be accessed via `/task`, and the old endpoint is retained to ensure compatibility with legacy automation scripts. Closes #7398 * fix(deps): update github.com/xhofe/tache to v0.1.3 --- go.mod | 2 +- go.sum | 2 + internal/fs/copy.go | 12 +- internal/fs/fs.go | 8 +- internal/fs/put.go | 9 +- internal/offline_download/tool/add.go | 11 +- internal/offline_download/tool/download.go | 6 +- internal/offline_download/tool/transfer.go | 3 +- internal/task/base.go | 26 ++++ server/handles/fsmanage.go | 4 +- server/handles/fsup.go | 13 +- server/handles/offline_download.go | 4 +- server/handles/task.go | 166 ++++++++++++++++----- server/middlewares/auth.go | 10 ++ server/router.go | 9 +- 15 files changed, 217 insertions(+), 68 deletions(-) create mode 100644 internal/task/base.go diff --git a/go.mod b/go.mod index 45e2c643..19bc7c2e 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,7 @@ require ( github.com/u2takey/ffmpeg-go v0.5.0 github.com/upyun/go-sdk/v3 v3.0.4 github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 - github.com/xhofe/tache v0.1.2 + github.com/xhofe/tache v0.1.3 github.com/xhofe/wopan-sdk-go v0.1.3 github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 golang.org/x/crypto v0.27.0 diff --git a/go.sum b/go.sum index 420a259f..78ac273a 100644 --- a/go.sum +++ b/go.sum @@ -514,6 +514,8 @@ github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3K github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= github.com/xhofe/tache v0.1.2 h1:pHrXlrWcbTb4G7hVUDW7Rc+YTUnLJvnLBrdktVE1Fqg= github.com/xhofe/tache v0.1.2/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ= +github.com/xhofe/tache v0.1.3 h1:MipxzlljYX29E1YI/SLC7hVomVF+51iP1OUzlsuq1wE= +github.com/xhofe/tache v0.1.3/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ= github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A= github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/internal/fs/copy.go b/internal/fs/copy.go index 38407c9a..d4ad452b 100644 --- a/internal/fs/copy.go +++ b/internal/fs/copy.go @@ -11,13 +11,14 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" "github.com/alist-org/alist/v3/pkg/utils" "github.com/pkg/errors" "github.com/xhofe/tache" ) type CopyTask struct { - tache.Base + task.TaskWithCreator Status string `json:"-"` //don't save status to save space SrcObjPath string `json:"src_path"` DstDirPath string `json:"dst_path"` @@ -53,7 +54,7 @@ var CopyTaskManager *tache.Manager[*CopyTask] // Copy if in the same storage, call move method // if not, add copy task -func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) { +func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskInfoWithCreator, error) { srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath) if err != nil { return nil, errors.WithMessage(err, "failed get src storage") @@ -92,7 +93,11 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool } } // not in the same storage + taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed t := &CopyTask{ + TaskWithCreator: task.TaskWithCreator{ + Creator: taskCreator, + }, srcStorage: srcStorage, dstStorage: dstStorage, SrcObjPath: srcObjActualPath, @@ -123,6 +128,9 @@ func copyBetween2Storages(t *CopyTask, srcStorage, dstStorage driver.Driver, src srcObjPath := stdpath.Join(srcObjPath, obj.GetName()) dstObjPath := stdpath.Join(dstDirPath, srcObj.GetName()) CopyTaskManager.Add(&CopyTask{ + TaskWithCreator: task.TaskWithCreator{ + Creator: t.Creator, + }, srcStorage: srcStorage, dstStorage: dstStorage, SrcObjPath: srcObjPath, diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 23e8a87a..65e5a2c2 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -5,8 +5,8 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/task" log "github.com/sirupsen/logrus" - "github.com/xhofe/tache" ) // the param named path of functions in this package is a mount path @@ -69,7 +69,7 @@ func Move(ctx context.Context, srcPath, dstDirPath string, lazyCache ...bool) er return err } -func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) { +func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskInfoWithCreator, error) { res, err := _copy(ctx, srcObjPath, dstDirPath, lazyCache...) if err != nil { log.Errorf("failed copy %s to %s: %+v", srcObjPath, dstDirPath, err) @@ -101,8 +101,8 @@ func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer return err } -func PutAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) { - t, err := putAsTask(dstDirPath, file) +func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskInfoWithCreator, error) { + t, err := putAsTask(ctx, dstDirPath, file) if err != nil { log.Errorf("failed put %s: %+v", dstDirPath, err) } diff --git a/internal/fs/put.go b/internal/fs/put.go index 807b15e0..23197f5b 100644 --- a/internal/fs/put.go +++ b/internal/fs/put.go @@ -7,12 +7,13 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/task" "github.com/pkg/errors" "github.com/xhofe/tache" ) type UploadTask struct { - tache.Base + task.TaskWithCreator storage driver.Driver dstDirActualPath string file model.FileStreamer @@ -33,7 +34,7 @@ func (t *UploadTask) Run() error { var UploadTaskManager *tache.Manager[*UploadTask] // putAsTask add as a put task and return immediately -func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) { +func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskInfoWithCreator, error) { storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) if err != nil { return nil, errors.WithMessage(err, "failed get storage") @@ -49,7 +50,11 @@ func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, //file.SetReader(tempFile) //file.SetTmpFile(tempFile) } + taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed t := &UploadTask{ + TaskWithCreator: task.TaskWithCreator{ + Creator: taskCreator, + }, storage: storage, dstDirActualPath: dstDirActualPath, file: file, diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go index c7c5c781..1c9da146 100644 --- a/internal/offline_download/tool/add.go +++ b/internal/offline_download/tool/add.go @@ -2,6 +2,8 @@ package tool import ( "context" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/task" "path/filepath" "github.com/alist-org/alist/v3/internal/conf" @@ -9,7 +11,6 @@ import ( "github.com/alist-org/alist/v3/internal/op" "github.com/google/uuid" "github.com/pkg/errors" - "github.com/xhofe/tache" ) type DeletePolicy string @@ -28,7 +29,7 @@ type AddURLArgs struct { DeletePolicy DeletePolicy } -func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) { +func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskInfoWithCreator, error) { // get tool tool, err := Tools.Get(args.Tool) if err != nil { @@ -77,8 +78,12 @@ func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) { // 防止将下载好的文件删除 deletePolicy = DeleteNever } - + + taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed t := &DownloadTask{ + TaskWithCreator: task.TaskWithCreator{ + Creator: taskCreator, + }, Url: args.URL, DstDirPath: args.DstDirPath, TempDir: tempDir, diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go index ef9ceabf..038baf96 100644 --- a/internal/offline_download/tool/download.go +++ b/internal/offline_download/tool/download.go @@ -7,13 +7,14 @@ import ( "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/task" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/xhofe/tache" ) type DownloadTask struct { - tache.Base + task.TaskWithCreator Url string `json:"url"` DstDirPath string `json:"dst_dir_path"` TempDir string `json:"temp_dir"` @@ -171,6 +172,9 @@ func (t *DownloadTask) Complete() error { for i := range files { file := files[i] TransferTaskManager.Add(&TransferTask{ + TaskWithCreator: task.TaskWithCreator{ + Creator: t.Creator, + }, file: file, DstDirPath: t.DstDirPath, TempDir: t.TempDir, diff --git a/internal/offline_download/tool/transfer.go b/internal/offline_download/tool/transfer.go index 3744c7b5..085b4a66 100644 --- a/internal/offline_download/tool/transfer.go +++ b/internal/offline_download/tool/transfer.go @@ -8,6 +8,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" "github.com/alist-org/alist/v3/pkg/utils" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -15,7 +16,7 @@ import ( ) type TransferTask struct { - tache.Base + task.TaskWithCreator FileDir string `json:"file_dir"` DstDirPath string `json:"dst_dir_path"` TempDir string `json:"temp_dir"` diff --git a/internal/task/base.go b/internal/task/base.go new file mode 100644 index 00000000..a30e5987 --- /dev/null +++ b/internal/task/base.go @@ -0,0 +1,26 @@ +package task + +import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/xhofe/tache" +) + +type TaskWithCreator struct { + tache.Base + Creator *model.User +} + +func (t *TaskWithCreator) SetCreator(creator *model.User) { + t.Creator = creator + t.Persist() +} + +func (t *TaskWithCreator) GetCreator() *model.User { + return t.Creator +} + +type TaskInfoWithCreator interface { + tache.TaskWithInfo + SetCreator(creator *model.User) + GetCreator() *model.User +} diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go index 3d446eda..42d53d7e 100644 --- a/server/handles/fsmanage.go +++ b/server/handles/fsmanage.go @@ -2,7 +2,7 @@ package handles import ( "fmt" - "github.com/xhofe/tache" + "github.com/alist-org/alist/v3/internal/task" "io" stdpath "path" @@ -121,7 +121,7 @@ func FsCopy(c *gin.Context) { common.ErrorResp(c, err, 403) return } - var addedTasks []tache.TaskWithInfo + var addedTasks []task.TaskInfoWithCreator for i, name := range req.Names { t, err := fs.Copy(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1) if t != nil { diff --git a/server/handles/fsup.go b/server/handles/fsup.go index ef9baa11..3a366d49 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -1,17 +1,16 @@ package handles import ( - "github.com/xhofe/tache" + "github.com/alist-org/alist/v3/internal/task" "io" "net/url" stdpath "path" "strconv" "time" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" ) @@ -58,9 +57,9 @@ func FsStream(c *gin.Context) { Mimetype: c.GetHeader("Content-Type"), WebPutAsTask: asTask, } - var t tache.TaskWithInfo + var t task.TaskInfoWithCreator if asTask { - t, err = fs.PutAsTask(dir, s) + t, err = fs.PutAsTask(c, dir, s) } else { err = fs.PutDirectly(c, dir, s, true) } @@ -123,12 +122,12 @@ func FsForm(c *gin.Context) { Mimetype: file.Header.Get("Content-Type"), WebPutAsTask: asTask, } - var t tache.TaskWithInfo + var t task.TaskInfoWithCreator if asTask { s.Reader = struct { io.Reader }{f} - t, err = fs.PutAsTask(dir, &s) + t, err = fs.PutAsTask(c, dir, &s) } else { ss, err := stream.NewSeekableStream(s, nil) if err != nil { diff --git a/server/handles/offline_download.go b/server/handles/offline_download.go index 1c5f9555..ff1fcfa0 100644 --- a/server/handles/offline_download.go +++ b/server/handles/offline_download.go @@ -5,9 +5,9 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/offline_download/tool" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/task" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" - "github.com/xhofe/tache" ) type SetAria2Req struct { @@ -133,7 +133,7 @@ func AddOfflineDownload(c *gin.Context) { common.ErrorResp(c, err, 403) return } - var tasks []tache.TaskWithInfo + var tasks []task.TaskInfoWithCreator for _, url := range req.Urls { t, err := tool.AddURL(c, &tool.AddURLArgs{ URL: url, diff --git a/server/handles/task.go b/server/handles/task.go index a8b4d21b..71b4c622 100644 --- a/server/handles/task.go +++ b/server/handles/task.go @@ -1,6 +1,8 @@ package handles import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/task" "math" "github.com/alist-org/alist/v3/internal/fs" @@ -12,15 +14,17 @@ import ( ) type TaskInfo struct { - ID string `json:"id"` - Name string `json:"name"` - State tache.State `json:"state"` - Status string `json:"status"` - Progress float64 `json:"progress"` - Error string `json:"error"` + ID string `json:"id"` + Name string `json:"name"` + Creator string `json:"creator"` + CreatorRole int `json:"creator_role"` + State tache.State `json:"state"` + Status string `json:"status"` + Progress float64 `json:"progress"` + Error string `json:"error"` } -func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo { +func getTaskInfo[T task.TaskInfoWithCreator](task T) TaskInfo { errMsg := "" if task.GetErr() != nil { errMsg = task.GetErr().Error() @@ -30,62 +34,142 @@ func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo { if math.IsNaN(progress) { progress = 100 } + creatorName := "" + creatorRole := -1 + if task.GetCreator() != nil { + creatorName = task.GetCreator().Username + creatorRole = task.GetCreator().Role + } return TaskInfo{ - ID: task.GetID(), - Name: task.GetName(), - State: task.GetState(), - Status: task.GetStatus(), - Progress: progress, - Error: errMsg, + ID: task.GetID(), + Name: task.GetName(), + Creator: creatorName, + CreatorRole: creatorRole, + State: task.GetState(), + Status: task.GetStatus(), + Progress: progress, + Error: errMsg, } } -func getTaskInfos[T tache.TaskWithInfo](tasks []T) []TaskInfo { +func getTaskInfos[T task.TaskInfoWithCreator](tasks []T) []TaskInfo { return utils.MustSliceConvert(tasks, getTaskInfo[T]) } -func taskRoute[T tache.TaskWithInfo](g *gin.RouterGroup, manager *tache.Manager[T]) { - g.GET("/undone", func(c *gin.Context) { - common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StatePending, tache.StateRunning, - tache.StateCanceling, tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry))) - }) - g.GET("/done", func(c *gin.Context) { - common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded))) - }) - g.POST("/info", func(c *gin.Context) { - tid := c.Query("tid") - task, ok := manager.GetByID(tid) +func argsContains[T comparable](v T, slice ...T) bool { + return utils.SliceContains(slice, v) +} + +func getUserInfo(c *gin.Context) (bool, uint, bool) { + if user, ok := c.Value("user").(*model.User); ok { + return user.IsAdmin(), user.ID, true + } else { + return false, 0, false + } +} + +func getTargetedHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc { + return func(c *gin.Context) { + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + t, ok := manager.GetByID(c.Query("tid")) if !ok { common.ErrorStrResp(c, "task not found", 404) return } + if !isAdmin && uid != t.GetCreator().ID { + // to avoid an attacker using error messages to guess valid TID, return a 404 rather than a 403 + common.ErrorStrResp(c, "task not found", 404) + return + } + callback(c, t) + } +} + +func taskRoute[T task.TaskInfoWithCreator](g *gin.RouterGroup, manager *tache.Manager[T]) { + g.GET("/undone", func(c *gin.Context) { + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool { + // avoid directly passing the user object into the function to reduce closure size + return (isAdmin || uid == task.GetCreator().ID) && + argsContains(task.GetState(), tache.StatePending, tache.StateRunning, tache.StateCanceling, + tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry) + }))) + }) + g.GET("/done", func(c *gin.Context) { + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool { + return (isAdmin || uid == task.GetCreator().ID) && + argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded) + }))) + }) + g.POST("/info", getTargetedHandler(manager, func(c *gin.Context, task T) { common.SuccessResp(c, getTaskInfo(task)) - }) - g.POST("/cancel", func(c *gin.Context) { - tid := c.Query("tid") - manager.Cancel(tid) + })) + g.POST("/cancel", getTargetedHandler(manager, func(c *gin.Context, task T) { + manager.Cancel(task.GetID()) common.SuccessResp(c) - }) - g.POST("/delete", func(c *gin.Context) { - tid := c.Query("tid") - manager.Remove(tid) + })) + g.POST("/delete", getTargetedHandler(manager, func(c *gin.Context, task T) { + manager.Remove(task.GetID()) common.SuccessResp(c) - }) - g.POST("/retry", func(c *gin.Context) { - tid := c.Query("tid") - manager.Retry(tid) + })) + g.POST("/retry", getTargetedHandler(manager, func(c *gin.Context, task T) { + manager.Retry(task.GetID()) common.SuccessResp(c) - }) + })) g.POST("/clear_done", func(c *gin.Context) { - manager.RemoveByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded) + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + manager.RemoveByCondition(func(task T) bool { + return (isAdmin || uid == task.GetCreator().ID) && + argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded) + }) common.SuccessResp(c) }) g.POST("/clear_succeeded", func(c *gin.Context) { - manager.RemoveByState(tache.StateSucceeded) + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + manager.RemoveByCondition(func(task T) bool { + return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateSucceeded + }) common.SuccessResp(c) }) g.POST("/retry_failed", func(c *gin.Context) { - manager.RetryAllFailed() + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + tasks := manager.GetByCondition(func(task T) bool { + return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateFailed + }) + for _, t := range tasks { + manager.Retry(t.GetID()) + } common.SuccessResp(c) }) } diff --git a/server/middlewares/auth.go b/server/middlewares/auth.go index 14f186be..d65d1ad6 100644 --- a/server/middlewares/auth.go +++ b/server/middlewares/auth.go @@ -127,6 +127,16 @@ func Authn(c *gin.Context) { c.Next() } +func AuthNotGuest(c *gin.Context) { + user := c.MustGet("user").(*model.User) + if user.IsGuest() { + common.ErrorStrResp(c, "You are a guest", 403) + c.Abort() + } else { + c.Next() + } +} + func AuthAdmin(c *gin.Context) { user := c.MustGet("user").(*model.User) if !user.IsAdmin() { diff --git a/server/router.go b/server/router.go index 07423f92..fffa840e 100644 --- a/server/router.go +++ b/server/router.go @@ -76,6 +76,7 @@ func Init(e *gin.Engine) { public.Any("/offline_download_tools", handles.OfflineDownloadTools) _fs(auth.Group("/fs")) + _task(auth.Group("/task", middlewares.AuthNotGuest)) admin(auth.Group("/admin", middlewares.AuthAdmin)) if flags.Debug || flags.Dev { debug(g.Group("/debug")) @@ -127,8 +128,8 @@ func admin(g *gin.RouterGroup) { setting.POST("/set_qbit", handles.SetQbittorrent) setting.POST("/set_transmission", handles.SetTransmission) - task := g.Group("/task") - handles.SetupTaskRoute(task) + // retain /admin/task API to ensure compatibility with legacy automation scripts + _task(g.Group("/task")) ms := g.Group("/message") ms.POST("/get", message.HttpInstance.GetHandle) @@ -166,6 +167,10 @@ func _fs(g *gin.RouterGroup) { g.POST("/add_offline_download", handles.AddOfflineDownload) } +func _task(g *gin.RouterGroup) { + handles.SetupTaskRoute(g) +} + func Cors(r *gin.Engine) { config := cors.DefaultConfig() // config.AllowAllOrigins = true From b803b0070ecde83f2868b15902c34fe9543fb2e2 Mon Sep 17 00:00:00 2001 From: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Sat, 2 Nov 2024 16:41:33 +0800 Subject: [PATCH 017/187] fix(115): 20GB file upload restriction (#7452) * fix(115): multipart upload error * feat(115): Modify default page size * fix(115): Replace temporary repair scheme --- drivers/115/meta.go | 2 +- drivers/115/types.go | 18 +++++++++++++- drivers/115/util.go | 51 +++++++++++++++------------------------ drivers/115_share/meta.go | 2 +- 4 files changed, 38 insertions(+), 35 deletions(-) diff --git a/drivers/115/meta.go b/drivers/115/meta.go index 38c1742a..d9526775 100644 --- a/drivers/115/meta.go +++ b/drivers/115/meta.go @@ -9,7 +9,7 @@ type Addition struct { Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"` QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` - PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"` + PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` driver.RootID } diff --git a/drivers/115/types.go b/drivers/115/types.go index 830e347b..40b951d8 100644 --- a/drivers/115/types.go +++ b/drivers/115/types.go @@ -1,10 +1,11 @@ package _115 import ( + "time" + "github.com/SheltonZhu/115driver/pkg/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" - "time" ) var _ model.Obj = (*FileObj)(nil) @@ -20,3 +21,18 @@ func (f *FileObj) CreateTime() time.Time { func (f *FileObj) GetHash() utils.HashInfo { return utils.NewHashInfo(utils.SHA1, f.Sha1) } + +type UploadResult struct { + driver.BasicResp + Data struct { + PickCode string `json:"pick_code"` + FileSize int `json:"file_size"` + FileID string `json:"file_id"` + ThumbURL string `json:"thumb_url"` + Sha1 string `json:"sha1"` + Aid int `json:"aid"` + FileName string `json:"file_name"` + Cid string `json:"cid"` + IsVideo int `json:"is_video"` + } `json:"data"` +} diff --git a/drivers/115/util.go b/drivers/115/util.go index 7d5889af..381ef0bd 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -10,7 +10,6 @@ import ( "io" "net/http" "net/url" - "path/filepath" "strconv" "strings" "sync" @@ -254,6 +253,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i ossClient *oss.Client bucket *oss.Bucket ossToken *driver115.UploadOSSTokenResp + bodyBytes []byte err error ) @@ -268,12 +268,14 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i f(options) } } + // oss 启用Sequential必须按顺序上传 + options.ThreadsNum = 1 if ossToken, err = d.client.GetOSSToken(); err != nil { return err } - if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil { + if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret, oss.EnableMD5(true), oss.EnableCRC(true)); err != nil { return err } @@ -294,6 +296,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i if imur, err = bucket.InitiateMultipartUpload(params.Object, oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken), oss.UserAgentHeader(driver115.OSSUserAgent), + oss.EnableSha1(), oss.Sequential(), ); err != nil { return err } @@ -337,8 +340,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i continue } - b := bytes.NewBuffer(buf) - if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { + if part, err = bucket.UploadPart(imur, bytes.NewBuffer(buf), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { break } } @@ -373,14 +375,20 @@ LOOP: } } - // EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的 - if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) { - // 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的 - if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") { - return err - } + // 不知道啥原因,oss那边分片上传不计算sha1,导致115服务器校验错误 + // params.Callback.Callback = strings.ReplaceAll(params.Callback.Callback, "${sha1}", params.SHA1) + if _, err := bucket.CompleteMultipartUpload(imur, parts, append( + driver115.OssOption(params, ossToken), + oss.CallbackResult(&bodyBytes), + )...); err != nil { + return err } - return d.checkUploadStatus(dirID, params.SHA1) + + var uploadResult UploadResult + if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil { + return err + } + return uploadResult.Err(string(bodyBytes)) } func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) { @@ -389,27 +397,6 @@ func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) { } } -func (d *Pan115) checkUploadStatus(dirID, sha1 string) error { - // 验证上传是否成功 - req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8") - opts := []driver115.GetFileOptions{ - driver115.WithOrder(driver115.FileOrderByTime), - driver115.WithShowDirEnable(false), - driver115.WithAsc(false), - driver115.WithLimit(500), - } - fResp, err := driver115.GetFiles(req, dirID, opts...) - if err != nil { - return err - } - for _, fileInfo := range fResp.Files { - if fileInfo.Sha1 == sha1 { - return nil - } - } - return driver115.ErrUploadFailed -} - func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) { for i := int64(1); i < 10; i++ { if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片 diff --git a/drivers/115_share/meta.go b/drivers/115_share/meta.go index 1d203b24..3fcc7b92 100644 --- a/drivers/115_share/meta.go +++ b/drivers/115_share/meta.go @@ -9,7 +9,7 @@ type Addition struct { Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"` QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` - PageSize int64 `json:"page_size" type:"number" default:"20" help:"list api per page size of 115 driver"` + PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"` ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"` From e707fa38f1fce5c92922686ac421b37eb2173c23 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Sat, 2 Nov 2024 17:05:00 +0800 Subject: [PATCH 018/187] ci: remove specific tag for freebsd action --- .github/workflows/release_freebsd.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/release_freebsd.yml b/.github/workflows/release_freebsd.yml index 46afb326..70dcecb1 100644 --- a/.github/workflows/release_freebsd.yml +++ b/.github/workflows/release_freebsd.yml @@ -31,5 +31,4 @@ jobs: - name: Upload assets uses: softprops/action-gh-release@v2 with: - tag_name: dev files: build/compress/* From 2671c876f1fe7e6de1a3939c9ee5f588d9b5f41b Mon Sep 17 00:00:00 2001 From: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Sat, 2 Nov 2024 21:08:19 +0800 Subject: [PATCH 019/187] revert: "fix(115): enforce 20GB file size limit on uploadev" This reverts commit 216e3909f3946eb9c1b786c0d82c00f278f0ea25. --- drivers/115/driver.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/115/driver.go b/drivers/115/driver.go index 4857c1ec..f6fb6b05 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -2,7 +2,6 @@ package _115 import ( "context" - "fmt" "strings" "sync" @@ -122,10 +121,7 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr if err := d.WaitLimit(ctx); err != nil { return err } - if stream.GetSize() > utils.GB*20 { // TODO 由于官方分片上传接口失效,所以使用普通上传小于20GB的文件 - return fmt.Errorf("unsupported file size: 20GB limit exceeded") - } - // 分片上传 + var ( fastInfo *driver115.UploadInitResp dirID = dstDir.GetID() @@ -181,13 +177,11 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } // 闪传失败,上传 - // if stream.GetSize() <= utils.KB{ // 文件大小小于1KB,改用普通模式上传 - if stream.GetSize() <= utils.GB*20 { // TODO 由于官方分片上传接口失效,所以使用普通上传小于20GB的文件 + if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传 return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID) } - return driver115.ErrUnexpected // 分片上传 - // return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID) + return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID) } func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) { From f58de9923a75ed21009debe464fec867c854bbfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AE=89=E7=A8=B3?= Date: Fri, 8 Nov 2024 22:07:35 +0800 Subject: [PATCH 020/187] refactor(aliyunopen,config): Modify default properties (#7476) --- drivers/aliyundrive_open/meta.go | 2 +- internal/conf/config.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/aliyundrive_open/meta.go b/drivers/aliyundrive_open/meta.go index de9b45e0..58013143 100644 --- a/drivers/aliyundrive_open/meta.go +++ b/drivers/aliyundrive_open/meta.go @@ -6,7 +6,7 @@ import ( ) type Addition struct { - DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"` + DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"resource"` driver.RootID RefreshToken string `json:"refresh_token" required:"true"` OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"` diff --git a/internal/conf/config.go b/internal/conf/config.go index c5dc9c52..aa29e1f5 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -131,22 +131,22 @@ func DefaultConfig() *Config { TlsInsecureSkipVerify: true, Tasks: TasksConfig{ Download: TaskConfig{ - Workers: 5, - MaxRetry: 1, - TaskPersistant: true, + Workers: 5, + MaxRetry: 1, + // TaskPersistant: true, }, Transfer: TaskConfig{ - Workers: 5, - MaxRetry: 2, - TaskPersistant: true, + Workers: 5, + MaxRetry: 2, + // TaskPersistant: true, }, Upload: TaskConfig{ Workers: 5, }, Copy: TaskConfig{ - Workers: 5, - MaxRetry: 2, - TaskPersistant: true, + Workers: 5, + MaxRetry: 2, + // TaskPersistant: true, }, }, Cors: Cors{ From 67c93eed2b1e28e0425b93e5f3f0533de653cc70 Mon Sep 17 00:00:00 2001 From: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:08:25 +0800 Subject: [PATCH 021/187] feat(baidu_netdisk,baidu_photo): add and fix hashinfo (#7469) --- drivers/baidu_netdisk/types.go | 3 +- drivers/baidu_netdisk/util.go | 57 ++++++++++++++++++++++++---------- drivers/baidu_photo/types.go | 2 +- drivers/baidu_photo/utils.go | 41 ++++++++++++++++++++++++ 4 files changed, 84 insertions(+), 19 deletions(-) diff --git a/drivers/baidu_netdisk/types.go b/drivers/baidu_netdisk/types.go index cbec0bcf..6f3bf13b 100644 --- a/drivers/baidu_netdisk/types.go +++ b/drivers/baidu_netdisk/types.go @@ -6,6 +6,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" ) type TokenErrResp struct { @@ -72,7 +73,7 @@ func fileToObj(f File) *model.ObjThumb { IsFolder: f.Isdir == 1, // 直接获取的MD5是错误的 - // HashInfo: utils.NewHashInfo(utils.MD5, f.Md5), + HashInfo: utils.NewHashInfo(utils.MD5, DecryptMd5(f.Md5)), }, Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3}, } diff --git a/drivers/baidu_netdisk/util.go b/drivers/baidu_netdisk/util.go index ac1f06e8..ca1a6805 100644 --- a/drivers/baidu_netdisk/util.go +++ b/drivers/baidu_netdisk/util.go @@ -1,11 +1,14 @@ package baidu_netdisk import ( + "encoding/hex" "errors" "fmt" "net/http" "strconv" + "strings" "time" + "unicode" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/errs" @@ -153,8 +156,6 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model u = res.Header().Get("location") //} - updateObjMd5(file, "pan.baidu.com", u) - return &model.Link{ URL: u, Header: http.Header{ @@ -178,8 +179,6 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li return nil, err } - updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink) - return &model.Link{ URL: resp.Info[0].Dlink, Header: http.Header{ @@ -229,19 +228,6 @@ func joinTime(form map[string]string, ctime, mtime int64) { form["local_ctime"] = strconv.FormatInt(ctime, 10) } -func updateObjMd5(obj model.Obj, userAgent, u string) { - object := model.GetRawObject(obj) - if object != nil { - req, _ := http.NewRequest(http.MethodHead, u, nil) - req.Header.Add("User-Agent", userAgent) - resp, _ := base.HttpClient.Do(req) - if resp != nil { - contentMd5 := resp.Header.Get("Content-Md5") - object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5) - } - } -} - const ( DefaultSliceSize int64 = 4 * utils.MB VipSliceSize = 16 * utils.MB @@ -267,3 +253,40 @@ func (d *BaiduNetdisk) getSliceSize() int64 { // r = strings.ReplaceAll(r, "+", "%20") // return r // } + +func DecryptMd5(encryptMd5 string) string { + if _, err := hex.DecodeString(encryptMd5); err == nil { + return encryptMd5 + } + + var out strings.Builder + out.Grow(len(encryptMd5)) + for i, n := 0, int64(0); i < len(encryptMd5); i++ { + if i == 9 { + n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g') + } else { + n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64) + } + out.WriteString(strconv.FormatInt(n^int64(15&i), 16)) + } + + encryptMd5 = out.String() + return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24] +} + +func EncryptMd5(originalMd5 string) string { + reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24] + + var out strings.Builder + out.Grow(len(reversed)) + for i, n := 0, int64(0); i < len(reversed); i++ { + n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64) + n ^= int64(15 & i) + if i == 9 { + out.WriteRune(rune(n) + 'g') + } else { + out.WriteString(strconv.FormatInt(n, 16)) + } + } + return out.String() +} diff --git a/drivers/baidu_photo/types.go b/drivers/baidu_photo/types.go index 2bbacd30..0e5cbb2c 100644 --- a/drivers/baidu_photo/types.go +++ b/drivers/baidu_photo/types.go @@ -72,7 +72,7 @@ func (c *File) Thumb() string { } func (c *File) GetHash() utils.HashInfo { - return utils.NewHashInfo(utils.MD5, c.Md5) + return utils.NewHashInfo(utils.MD5, DecryptMd5(c.Md5)) } /*相册部分*/ diff --git a/drivers/baidu_photo/utils.go b/drivers/baidu_photo/utils.go index be0ed133..c8c5b7ee 100644 --- a/drivers/baidu_photo/utils.go +++ b/drivers/baidu_photo/utils.go @@ -2,8 +2,12 @@ package baiduphoto import ( "context" + "encoding/hex" "fmt" "net/http" + "strconv" + "strings" + "unicode" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/errs" @@ -476,3 +480,40 @@ func (d *BaiduPhoto) uInfo() (*UInfo, error) { } return &info, nil } + +func DecryptMd5(encryptMd5 string) string { + if _, err := hex.DecodeString(encryptMd5); err == nil { + return encryptMd5 + } + + var out strings.Builder + out.Grow(len(encryptMd5)) + for i, n := 0, int64(0); i < len(encryptMd5); i++ { + if i == 9 { + n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g') + } else { + n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64) + } + out.WriteString(strconv.FormatInt(n^int64(15&i), 16)) + } + + encryptMd5 = out.String() + return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24] +} + +func EncryptMd5(originalMd5 string) string { + reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24] + + var out strings.Builder + out.Grow(len(reversed)) + for i, n := 0, int64(0); i < len(reversed); i++ { + n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64) + n ^= int64(15 & i) + if i == 9 { + out.WriteRune(rune(n) + 'g') + } else { + out.WriteString(strconv.FormatInt(n, 16)) + } + } + return out.String() +} From 0a46979c519885465e586da009b70422382c84ac Mon Sep 17 00:00:00 2001 From: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:08:50 +0800 Subject: [PATCH 022/187] feat(115): enhance cache (#7479) --- drivers/115/driver.go | 100 +++++++++++++++++++++++++++++++----------- drivers/115/meta.go | 2 +- drivers/115/util.go | 84 +++++++++++++++++++++++++++++------ 3 files changed, 146 insertions(+), 40 deletions(-) diff --git a/drivers/115/driver.go b/drivers/115/driver.go index f6fb6b05..4f584cd7 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -79,28 +79,60 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) return link, nil } -func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { +func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { - return err + return nil, err } - if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil { - return err + + result := driver115.MkdirResp{} + form := map[string]string{ + "pid": parentDir.GetID(), + "cname": dirName, } - return nil + req := d.client.NewRequest(). + SetFormData(form). + SetResult(&result). + ForceContentType("application/json;charset=UTF-8") + + resp, err := req.Post(driver115.ApiDirAdd) + + err = driver115.CheckErr(err, &result, resp) + if err != nil { + return nil, err + } + f, err := d.getNewFile(result.FileID) + if err != nil { + return nil, nil + } + return f, nil } -func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error { +func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { - return err + return nil, err } - return d.client.Move(dstDir.GetID(), srcObj.GetID()) + if err := d.client.Move(dstDir.GetID(), srcObj.GetID()); err != nil { + return nil, err + } + f, err := d.getNewFile(srcObj.GetID()) + if err != nil { + return nil, nil + } + return f, nil } -func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error { +func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { - return err + return nil, err } - return d.client.Rename(srcObj.GetID(), newName) + if err := d.client.Rename(srcObj.GetID(), newName); err != nil { + return nil, err + } + f, err := d.getNewFile((srcObj.GetID())) + if err != nil { + return nil, nil + } + return f, nil } func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { @@ -117,9 +149,9 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error { return d.client.Delete(obj.GetID()) } -func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { - return err + return nil, err } var ( @@ -128,10 +160,10 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr ) if ok, err := d.client.UploadAvailable(); err != nil || !ok { - return err + return nil, err } if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit { - return driver115.ErrUploadTooLarge + return nil, driver115.ErrUploadTooLarge } //if digest, err = d.client.GetDigestResult(stream); err != nil { // return err @@ -144,22 +176,22 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize}) if err != nil { - return err + return nil, err } preHash, err := utils.HashReader(utils.SHA1, reader) if err != nil { - return err + return nil, err } preHash = strings.ToUpper(preHash) fullHash := stream.GetHash().GetHash(utils.SHA1) if len(fullHash) <= 0 { tmpF, err := stream.CacheFullInTempFile() if err != nil { - return err + return nil, err } fullHash, err = utils.HashFile(utils.SHA1, tmpF) if err != nil { - return err + return nil, err } } fullHash = strings.ToUpper(fullHash) @@ -168,20 +200,36 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr // note that 115 add timeout for rapid-upload, // and "sig invalid" err is thrown even when the hash is correct after timeout. if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil { - return err + return nil, err } if matched, err := fastInfo.Ok(); err != nil { - return err + return nil, err } else if matched { - return nil + f, err := d.getNewFileByPickCode(fastInfo.PickCode) + if err != nil { + return nil, nil + } + return f, nil } + var uploadResult *UploadResult // 闪传失败,上传 - if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传 - return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID) + if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传 + if uploadResult, err = d.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID); err != nil { + return nil, err + } + } else { + // 分片上传 + if uploadResult, err = d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID); err != nil { + return nil, err + } } - // 分片上传 - return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID) + + file, err := d.getNewFile(uploadResult.Data.FileID) + if err != nil { + return nil, nil + } + return file, nil } func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) { diff --git a/drivers/115/meta.go b/drivers/115/meta.go index d9526775..3b192291 100644 --- a/drivers/115/meta.go +++ b/drivers/115/meta.go @@ -10,7 +10,7 @@ type Addition struct { QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` - LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` + LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate ([limit]r/1s)"` driver.RootID } diff --git a/drivers/115/util.go b/drivers/115/util.go index 381ef0bd..33e34570 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -74,6 +74,34 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) { return res, nil } +func (d *Pan115) getNewFile(fileId string) (*FileObj, error) { + file, err := d.client.GetFile(fileId) + if err != nil { + return nil, err + } + return &FileObj{*file}, nil +} + +func (d *Pan115) getNewFileByPickCode(pickCode string) (*FileObj, error) { + result := driver115.GetFileInfoResponse{} + req := d.client.NewRequest(). + SetQueryParam("pick_code", pickCode). + ForceContentType("application/json;charset=UTF-8"). + SetResult(&result) + resp, err := req.Get(driver115.ApiFileInfo) + if err := driver115.CheckErr(err, &result, resp); err != nil { + return nil, err + } + if len(result.Files) == 0 { + return nil, errors.New("not get file info") + } + fileInfo := result.Files[0] + + f := &FileObj{} + f.From(fileInfo) + return f, nil +} + func (d *Pan115) getUA() string { return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer) } @@ -244,8 +272,38 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri return } +// UploadByOSS use aliyun sdk to upload +func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dirID string) (*UploadResult, error) { + ossToken, err := c.client.GetOSSToken() + if err != nil { + return nil, err + } + ossClient, err := oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret) + if err != nil { + return nil, err + } + bucket, err := ossClient.Bucket(params.Bucket) + if err != nil { + return nil, err + } + + var bodyBytes []byte + if err = bucket.PutObject(params.Object, r, append( + driver115.OssOption(params, ossToken), + oss.CallbackResult(&bodyBytes), + )...); err != nil { + return nil, err + } + + var uploadResult UploadResult + if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil { + return nil, err + } + return &uploadResult, uploadResult.Err(string(bodyBytes)) +} + // UploadByMultipart upload by mutipart blocks -func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error { +func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) (*UploadResult, error) { var ( chunks []oss.FileChunk parts []oss.UploadPart @@ -259,7 +317,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i tmpF, err := stream.CacheFullInTempFile() if err != nil { - return err + return nil, err } options := driver115.DefalutUploadMultipartOptions() @@ -272,15 +330,15 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i options.ThreadsNum = 1 if ossToken, err = d.client.GetOSSToken(); err != nil { - return err + return nil, err } if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret, oss.EnableMD5(true), oss.EnableCRC(true)); err != nil { - return err + return nil, err } if bucket, err = ossClient.Bucket(params.Bucket); err != nil { - return err + return nil, err } // ossToken一小时后就会失效,所以每50分钟重新获取一次 @@ -290,7 +348,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i timeout := time.NewTimer(options.Timeout) if chunks, err = SplitFile(fileSize); err != nil { - return err + return nil, err } if imur, err = bucket.InitiateMultipartUpload(params.Object, @@ -298,7 +356,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i oss.UserAgentHeader(driver115.OSSUserAgent), oss.EnableSha1(), oss.Sequential(), ); err != nil { - return err + return nil, err } wg := sync.WaitGroup{} @@ -364,14 +422,14 @@ LOOP: case <-ticker.C: // 到时重新获取ossToken if ossToken, err = d.client.GetOSSToken(); err != nil { - return err + return nil, err } case <-quit: break LOOP case <-errCh: - return err + return nil, err case <-timeout.C: - return fmt.Errorf("time out") + return nil, fmt.Errorf("time out") } } @@ -381,14 +439,14 @@ LOOP: driver115.OssOption(params, ossToken), oss.CallbackResult(&bodyBytes), )...); err != nil { - return err + return nil, err } var uploadResult UploadResult if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil { - return err + return nil, err } - return uploadResult.Err(string(bodyBytes)) + return &uploadResult, uploadResult.Err(string(bodyBytes)) } func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) { From 6c38c5972da06aa2f5b0badaafd3f5fe6683fca9 Mon Sep 17 00:00:00 2001 From: Jason-Fly <869914918@qq.com> Date: Sat, 16 Nov 2024 13:18:49 +0800 Subject: [PATCH 023/187] fix(terabox): big file upload issue (#7498 close #7490) --- drivers/terabox/driver.go | 122 ++++++++++++++++++++------------------ drivers/terabox/types.go | 4 ++ drivers/terabox/util.go | 21 +++++++ 3 files changed, 88 insertions(+), 59 deletions(-) diff --git a/drivers/terabox/driver.go b/drivers/terabox/driver.go index 11db351b..362de69e 100644 --- a/drivers/terabox/driver.go +++ b/drivers/terabox/driver.go @@ -10,8 +10,6 @@ import ( "math" stdpath "path" "strconv" - "strings" - "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/pkg/utils" @@ -24,9 +22,9 @@ import ( type Terabox struct { model.Storage Addition - JsToken string + JsToken string url_domain_prefix string - base_url string + base_url string } func (d *Terabox) Config() driver.Config { @@ -145,52 +143,24 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt } log.Debugln(locateupload_resp) - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return err - } - var Default int64 = 4 * 1024 * 1024 - defaultByteData := make([]byte, Default) - count := int(math.Ceil(float64(stream.GetSize()) / float64(Default))) - // cal md5 - h1 := md5.New() - h2 := md5.New() - block_list := make([]string, 0) - left := stream.GetSize() - for i := 0; i < count; i++ { - byteSize := Default - var byteData []byte - if left < Default { - byteSize = left - byteData = make([]byte, byteSize) - } else { - byteData = defaultByteData - } - left -= byteSize - _, err = io.ReadFull(tempFile, byteData) - if err != nil { - return err - } - h1.Write(byteData) - h2.Write(byteData) - block_list = append(block_list, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil)))) - h2.Reset() - } - - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return err - } - + // precreate file rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName()) path := encodeURIComponent(rawPath) - block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ",")) + + var precreateBlockListStr string + if stream.GetSize() > initialChunkSize { + precreateBlockListStr = `["5910a591dd8fc18c32a8f3df4fdc1761","a5fc157d78e6ad1c7e114b056c92821e"]` + } else { + precreateBlockListStr = `["5910a591dd8fc18c32a8f3df4fdc1761"]` + } + data := map[string]string{ - "path": rawPath, - "autoinit": "1", - "target_path": dstDir.GetPath(), - "block_list": block_list_str, - "local_mtime": strconv.FormatInt(time.Now().Unix(), 10), + "path": rawPath, + "autoinit": "1", + "target_path": dstDir.GetPath(), + "block_list": precreateBlockListStr, + "local_mtime": strconv.FormatInt(stream.ModTime().Unix(), 10), + "file_limit_switch_v34": "true", } var precreateResp PrecreateResp log.Debugln(data) @@ -206,6 +176,13 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt if precreateResp.ReturnType == 2 { return nil } + + // upload chunks + tempFile, err := stream.CacheFullInTempFile() + if err != nil { + return err + } + params := map[string]string{ "method": "upload", "path": path, @@ -215,24 +192,37 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt "channel": "dubox", "clienttype": "0", } - left = stream.GetSize() - for i, partseq := range precreateResp.BlockList { + + streamSize := stream.GetSize() + chunkSize := calculateChunkSize(streamSize) + chunkByteData := make([]byte, chunkSize) + count := int(math.Ceil(float64(streamSize) / float64(chunkSize))) + left := streamSize + uploadBlockList := make([]string, 0, count) + h := md5.New() + for partseq := 0; partseq < count; partseq++ { if utils.IsCanceled(ctx) { return ctx.Err() } - byteSize := Default + byteSize := chunkSize var byteData []byte - if left < Default { + if left >= chunkSize { + byteData = chunkByteData + } else { byteSize = left byteData = make([]byte, byteSize) - } else { - byteData = defaultByteData } left -= byteSize _, err = io.ReadFull(tempFile, byteData) if err != nil { return err } + + // calculate md5 + h.Write(byteData) + uploadBlockList = append(uploadBlockList, hex.EncodeToString(h.Sum(nil))) + h.Reset() + u := "https://" + locateupload_resp.Host + "/rest/2.0/pcs/superfile2" params["partseq"] = strconv.Itoa(partseq) res, err := base.RestyClient.R(). @@ -245,25 +235,39 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt return err } log.Debugln(res.String()) - if len(precreateResp.BlockList) > 0 { - up(float64(i) * 100 / float64(len(precreateResp.BlockList))) + if count > 0 { + up(float64(partseq) * 100 / float64(count)) } } + + // create file params = map[string]string{ "isdir": "0", "rtype": "1", } + + uploadBlockListStr, err := utils.Json.MarshalToString(uploadBlockList) + if err != nil { + return err + } data = map[string]string{ "path": rawPath, "size": strconv.FormatInt(stream.GetSize(), 10), "uploadid": precreateResp.Uploadid, "target_path": dstDir.GetPath(), - "block_list": block_list_str, - "local_mtime": strconv.FormatInt(time.Now().Unix(), 10), + "block_list": uploadBlockListStr, + "local_mtime": strconv.FormatInt(stream.ModTime().Unix(), 10), } - res, err = d.post_form("/api/create", params, data, nil) + var createResp CreateResp + res, err = d.post_form("/api/create", params, data, &createResp) log.Debugln(string(res)) - return err + if err != nil { + return err + } + if createResp.Errno != 0 { + return fmt.Errorf("[terabox] failed to create file, errno: %d", createResp.Errno) + } + return nil } var _ driver.Driver = (*Terabox)(nil) diff --git a/drivers/terabox/types.go b/drivers/terabox/types.go index 8bdbc6fc..f4d50dde 100644 --- a/drivers/terabox/types.go +++ b/drivers/terabox/types.go @@ -99,3 +99,7 @@ type CheckLoginResp struct { type LocateUploadResp struct { Host string `json:"host"` } + +type CreateResp struct { + Errno int `json:"errno"` +} diff --git a/drivers/terabox/util.go b/drivers/terabox/util.go index e0f3d74e..002f80b5 100644 --- a/drivers/terabox/util.go +++ b/drivers/terabox/util.go @@ -17,6 +17,11 @@ import ( log "github.com/sirupsen/logrus" ) +const ( + initialChunkSize int64 = 4 << 20 // 4MB + initialSizeThreshold int64 = 4 << 30 // 4GB +) + func getStrBetween(raw, start, end string) string { regexPattern := fmt.Sprintf(`%s(.*?)%s`, regexp.QuoteMeta(start), regexp.QuoteMeta(end)) regex := regexp.MustCompile(regexPattern) @@ -258,3 +263,19 @@ func encodeURIComponent(str string) string { r = strings.ReplaceAll(r, "+", "%20") return r } + +func calculateChunkSize(streamSize int64) int64 { + chunkSize := initialChunkSize + sizeThreshold := initialSizeThreshold + + if streamSize < chunkSize { + return streamSize + } + + for streamSize > sizeThreshold { + chunkSize <<= 1 + sizeThreshold <<= 1 + } + + return chunkSize +} From c3c5843dce04450a4d03bca22758268895454798 Mon Sep 17 00:00:00 2001 From: Jason-Fly <869914918@qq.com> Date: Sat, 16 Nov 2024 13:19:59 +0800 Subject: [PATCH 024/187] fix(terabox): panic due to slice out of range (#7499 close #7487) --- drivers/terabox/util.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/terabox/util.go b/drivers/terabox/util.go index 002f80b5..058eecd6 100644 --- a/drivers/terabox/util.go +++ b/drivers/terabox/util.go @@ -91,11 +91,15 @@ func (d *Terabox) request(rurl string, method string, callback base.ReqCallback, return d.request(rurl, method, callback, resp, true) } } else if errno == -6 { - log.Debugln(res.Header()) - d.url_domain_prefix = res.Header()["Url-Domain-Prefix"][0] - d.base_url = "https://" + d.url_domain_prefix + ".terabox.com" - log.Debugln("Redirect base_url to", d.base_url) - return d.request(rurl, method, callback, resp, noRetry...) + header := res.Header() + log.Debugln(header) + urlDomainPrefix := header.Get("Url-Domain-Prefix") + if len(urlDomainPrefix) > 0 { + d.url_domain_prefix = urlDomainPrefix + d.base_url = "https://" + d.url_domain_prefix + ".terabox.com" + log.Debugln("Redirect base_url to", d.base_url) + return d.request(rurl, method, callback, resp, noRetry...) + } } return res.Body(), nil } From 1c01dc683931f9a71b5f2bd97ce78cc1eecda9ff Mon Sep 17 00:00:00 2001 From: Jason-Fly <869914918@qq.com> Date: Sat, 16 Nov 2024 13:20:49 +0800 Subject: [PATCH 025/187] fix(storage): delete storage fails if a panic occurred during initialization (#7501) * fix(storage): store storages map when init storage panic * fix(drivers): add nil check to drop method --- drivers/chaoxing/driver.go | 4 +++- drivers/vtencent/drive.go | 4 +++- internal/op/storage.go | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/chaoxing/driver.go b/drivers/chaoxing/driver.go index de122c36..360c6e3d 100644 --- a/drivers/chaoxing/driver.go +++ b/drivers/chaoxing/driver.go @@ -67,7 +67,9 @@ func (d *ChaoXing) Init(ctx context.Context) error { } func (d *ChaoXing) Drop(ctx context.Context) error { - d.cron.Stop() + if d.cron != nil { + d.cron.Stop() + } return nil } diff --git a/drivers/vtencent/drive.go b/drivers/vtencent/drive.go index 67643143..36a91672 100644 --- a/drivers/vtencent/drive.go +++ b/drivers/vtencent/drive.go @@ -55,7 +55,9 @@ func (d *Vtencent) Init(ctx context.Context) error { } func (d *Vtencent) Drop(ctx context.Context) error { - d.cron.Stop() + if d.cron != nil { + d.cron.Stop() + } return nil } diff --git a/internal/op/storage.go b/internal/op/storage.go index 6790a8df..7d8831f5 100644 --- a/internal/op/storage.go +++ b/internal/op/storage.go @@ -101,7 +101,7 @@ func initStorage(ctx context.Context, storage model.Storage, storageDriver drive log.Errorf("panic init storage: %s", errInfo) driverStorage.SetStatus(errInfo) MustSaveDriverStorage(storageDriver) - storagesMap.Delete(driverStorage.MountPath) + storagesMap.Store(driverStorage.MountPath, storageDriver) } }() // Unmarshal Addition From a4ad98ee3e4647d2222d41cd22588fadc9a7535b Mon Sep 17 00:00:00 2001 From: BlueSkyXN <63384277+BlueSkyXN@users.noreply.github.com> Date: Sun, 17 Nov 2024 20:03:04 +0800 Subject: [PATCH 026/187] fix(pikpak): domain block and change to NET (#7350) --- drivers/pikpak/driver.go | 30 ++++++------- drivers/pikpak/util.go | 82 +++++++++++++++++----------------- drivers/pikpak_share/driver.go | 4 +- drivers/pikpak_share/util.go | 80 ++++++++++++++++----------------- 4 files changed, 98 insertions(+), 98 deletions(-) diff --git a/drivers/pikpak/driver.go b/drivers/pikpak/driver.go index 4208bb87..24de24d4 100644 --- a/drivers/pikpak/driver.go +++ b/drivers/pikpak/driver.go @@ -91,8 +91,8 @@ func (d *PikPak) Init(ctx context.Context) (err error) { ClientID: d.ClientID, ClientSecret: d.ClientSecret, Endpoint: oauth2.Endpoint{ - AuthURL: "https://user.mypikpak.com/v1/auth/signin", - TokenURL: "https://user.mypikpak.com/v1/auth/token", + AuthURL: "https://user.mypikpak.net/v1/auth/signin", + TokenURL: "https://user.mypikpak.net/v1/auth/token", AuthStyle: oauth2.AuthStyleInParams, }, } @@ -124,7 +124,7 @@ func (d *PikPak) Init(ctx context.Context) (err error) { } // 获取CaptchaToken - err = d.RefreshCaptchaTokenAtLogin(GetAction(http.MethodGet, "https://api-drive.mypikpak.com/drive/v1/files"), d.Common.GetUserID()) + err = d.RefreshCaptchaTokenAtLogin(GetAction(http.MethodGet, "https://api-drive.mypikpak.net/drive/v1/files"), d.Common.GetUserID()) if err != nil { return err } @@ -174,7 +174,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs) if !d.DisableMediaLink { queryParams["usage"] = "CACHE" } - _, err := d.request(fmt.Sprintf("https://api-drive.mypikpak.com/drive/v1/files/%s", file.GetID()), + _, err := d.request(fmt.Sprintf("https://api-drive.mypikpak.net/drive/v1/files/%s", file.GetID()), http.MethodGet, func(req *resty.Request) { req.SetQueryParams(queryParams) }, &resp) @@ -200,7 +200,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs) } func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "kind": "drive#folder", "parent_id": parentDir.GetID(), @@ -211,7 +211,7 @@ func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin } func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error { - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchMove", http.MethodPost, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchMove", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "ids": []string{srcObj.GetID()}, "to": base.Json{ @@ -223,7 +223,7 @@ func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error { } func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) error { - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/files/"+srcObj.GetID(), http.MethodPatch, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/files/"+srcObj.GetID(), http.MethodPatch, func(req *resty.Request) { req.SetBody(base.Json{ "name": newName, }) @@ -232,7 +232,7 @@ func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) e } func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchCopy", http.MethodPost, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchCopy", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "ids": []string{srcObj.GetID()}, "to": base.Json{ @@ -244,7 +244,7 @@ func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { } func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error { - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchTrash", http.MethodPost, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchTrash", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "ids": []string{obj.GetID()}, }) @@ -268,7 +268,7 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } var resp UploadTaskData - res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) { + res, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "kind": "drive#file", "name": stream.GetName(), @@ -292,9 +292,9 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr params := resp.Resumable.Params //endpoint := strings.Join(strings.Split(params.Endpoint, ".")[1:], ".") - // web 端上传 返回的endpoint 为 `mypikpak.com` | android 端上传 返回的endpoint 为 `vip-lixian-07.mypikpak.com`· + // web 端上传 返回的endpoint 为 `mypikpak.net` | android 端上传 返回的endpoint 为 `vip-lixian-07.mypikpak.net`· if d.Addition.Platform == "android" { - params.Endpoint = "mypikpak.com" + params.Endpoint = "mypikpak.net" } if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB,改用普通模式上传 @@ -318,7 +318,7 @@ func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir } var resp OfflineDownloadResp - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) { req.SetBody(requestBody) }, &resp) @@ -336,7 +336,7 @@ PHASE_TYPE_RUNNING, PHASE_TYPE_ERROR, PHASE_TYPE_COMPLETE, PHASE_TYPE_PENDING */ func (d *PikPak) OfflineList(ctx context.Context, nextPageToken string, phase []string) ([]OfflineTask, error) { res := make([]OfflineTask, 0) - url := "https://api-drive.mypikpak.com/drive/v1/tasks" + url := "https://api-drive.mypikpak.net/drive/v1/tasks" if len(phase) == 0 { phase = []string{"PHASE_TYPE_RUNNING", "PHASE_TYPE_ERROR", "PHASE_TYPE_COMPLETE", "PHASE_TYPE_PENDING"} @@ -377,7 +377,7 @@ func (d *PikPak) OfflineList(ctx context.Context, nextPageToken string, phase [] } func (d *PikPak) DeleteOfflineTasks(ctx context.Context, taskIDs []string, deleteFiles bool) error { - url := "https://api-drive.mypikpak.com/drive/v1/tasks" + url := "https://api-drive.mypikpak.net/drive/v1/tasks" params := map[string]string{ "task_ids": strings.Join(taskIDs, ","), "delete_files": strconv.FormatBool(deleteFiles), diff --git a/drivers/pikpak/util.go b/drivers/pikpak/util.go index 1fd26020..6c5c88ad 100644 --- a/drivers/pikpak/util.go +++ b/drivers/pikpak/util.go @@ -86,51 +86,51 @@ const ( WebClientID = "YUMx5nI8ZU8Ap8pm" WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg" WebClientVersion = "2.0.0" - WebPackageName = "mypikpak.com" + WebPackageName = "mypikpak.net" WebSdkVersion = "8.0.3" PCClientID = "YvtoWO6GNHiuCl7x" PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA" PCClientVersion = "undefined" // 2.5.6.4831 - PCPackageName = "mypikpak.com" + PCPackageName = "mypikpak.net" PCSdkVersion = "8.0.3" ) var DlAddr = []string{ - "dl-a10b-0621.mypikpak.com", - "dl-a10b-0622.mypikpak.com", - "dl-a10b-0623.mypikpak.com", - "dl-a10b-0624.mypikpak.com", - "dl-a10b-0625.mypikpak.com", - "dl-a10b-0858.mypikpak.com", - "dl-a10b-0859.mypikpak.com", - "dl-a10b-0860.mypikpak.com", - "dl-a10b-0861.mypikpak.com", - "dl-a10b-0862.mypikpak.com", - "dl-a10b-0863.mypikpak.com", - "dl-a10b-0864.mypikpak.com", - "dl-a10b-0865.mypikpak.com", - "dl-a10b-0866.mypikpak.com", - "dl-a10b-0867.mypikpak.com", - "dl-a10b-0868.mypikpak.com", - "dl-a10b-0869.mypikpak.com", - "dl-a10b-0870.mypikpak.com", - "dl-a10b-0871.mypikpak.com", - "dl-a10b-0872.mypikpak.com", - "dl-a10b-0873.mypikpak.com", - "dl-a10b-0874.mypikpak.com", - "dl-a10b-0875.mypikpak.com", - "dl-a10b-0876.mypikpak.com", - "dl-a10b-0877.mypikpak.com", - "dl-a10b-0878.mypikpak.com", - "dl-a10b-0879.mypikpak.com", - "dl-a10b-0880.mypikpak.com", - "dl-a10b-0881.mypikpak.com", - "dl-a10b-0882.mypikpak.com", - "dl-a10b-0883.mypikpak.com", - "dl-a10b-0884.mypikpak.com", - "dl-a10b-0885.mypikpak.com", - "dl-a10b-0886.mypikpak.com", - "dl-a10b-0887.mypikpak.com", + "dl-a10b-0621.mypikpak.net", + "dl-a10b-0622.mypikpak.net", + "dl-a10b-0623.mypikpak.net", + "dl-a10b-0624.mypikpak.net", + "dl-a10b-0625.mypikpak.net", + "dl-a10b-0858.mypikpak.net", + "dl-a10b-0859.mypikpak.net", + "dl-a10b-0860.mypikpak.net", + "dl-a10b-0861.mypikpak.net", + "dl-a10b-0862.mypikpak.net", + "dl-a10b-0863.mypikpak.net", + "dl-a10b-0864.mypikpak.net", + "dl-a10b-0865.mypikpak.net", + "dl-a10b-0866.mypikpak.net", + "dl-a10b-0867.mypikpak.net", + "dl-a10b-0868.mypikpak.net", + "dl-a10b-0869.mypikpak.net", + "dl-a10b-0870.mypikpak.net", + "dl-a10b-0871.mypikpak.net", + "dl-a10b-0872.mypikpak.net", + "dl-a10b-0873.mypikpak.net", + "dl-a10b-0874.mypikpak.net", + "dl-a10b-0875.mypikpak.net", + "dl-a10b-0876.mypikpak.net", + "dl-a10b-0877.mypikpak.net", + "dl-a10b-0878.mypikpak.net", + "dl-a10b-0879.mypikpak.net", + "dl-a10b-0880.mypikpak.net", + "dl-a10b-0881.mypikpak.net", + "dl-a10b-0882.mypikpak.net", + "dl-a10b-0883.mypikpak.net", + "dl-a10b-0884.mypikpak.net", + "dl-a10b-0885.mypikpak.net", + "dl-a10b-0886.mypikpak.net", + "dl-a10b-0887.mypikpak.net", } func (d *PikPak) login() error { @@ -139,7 +139,7 @@ func (d *PikPak) login() error { return errors.New("username or password is empty") } - url := "https://user.mypikpak.com/v1/auth/signin" + url := "https://user.mypikpak.net/v1/auth/signin" // 使用 用户填写的 CaptchaToken —————— (验证后的captcha_token) if d.GetCaptchaToken() == "" { if err := d.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), d.Username); err != nil { @@ -169,7 +169,7 @@ func (d *PikPak) login() error { } func (d *PikPak) refreshToken(refreshToken string) error { - url := "https://user.mypikpak.com/v1/auth/token" + url := "https://user.mypikpak.net/v1/auth/token" var e ErrResp res, err := base.RestyClient.SetRetryCount(1).R().SetError(&e). SetHeader("user-agent", "").SetBody(base.Json{ @@ -307,7 +307,7 @@ func (d *PikPak) getFiles(id string) ([]File, error) { "page_token": pageToken, } var resp Files - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodGet, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(query) }, &resp) if err != nil { @@ -473,7 +473,7 @@ func (d *PikPak) refreshCaptchaToken(action string, metas map[string]string) err } var e ErrResp var resp CaptchaTokenResponse - _, err := d.request("https://user.mypikpak.com/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) { + _, err := d.request("https://user.mypikpak.net/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) { req.SetError(&e).SetBody(param).SetQueryParam("client_id", d.ClientID) }, &resp) diff --git a/drivers/pikpak_share/driver.go b/drivers/pikpak_share/driver.go index 91cb45ca..f107ac17 100644 --- a/drivers/pikpak_share/driver.go +++ b/drivers/pikpak_share/driver.go @@ -80,7 +80,7 @@ func (d *PikPakShare) Init(ctx context.Context) error { } // 获取CaptchaToken - err := d.RefreshCaptchaToken(GetAction(http.MethodGet, "https://api-drive.mypikpak.com/drive/v1/share:batch_file_info"), "") + err := d.RefreshCaptchaToken(GetAction(http.MethodGet, "https://api-drive.mypikpak.net/drive/v1/share:batch_file_info"), "") if err != nil { return err } @@ -113,7 +113,7 @@ func (d *PikPakShare) Link(ctx context.Context, file model.Obj, args model.LinkA "file_id": file.GetID(), "pass_code_token": d.PassCodeToken, } - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/share/file_info", http.MethodGet, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/share/file_info", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(query) }, &resp) if err != nil { diff --git a/drivers/pikpak_share/util.go b/drivers/pikpak_share/util.go index f333ca5f..1b14a65a 100644 --- a/drivers/pikpak_share/util.go +++ b/drivers/pikpak_share/util.go @@ -68,51 +68,51 @@ const ( WebClientID = "YUMx5nI8ZU8Ap8pm" WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg" WebClientVersion = "2.0.0" - WebPackageName = "mypikpak.com" + WebPackageName = "mypikpak.net" WebSdkVersion = "8.0.3" PCClientID = "YvtoWO6GNHiuCl7x" PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA" PCClientVersion = "undefined" // 2.5.6.4831 - PCPackageName = "mypikpak.com" + PCPackageName = "mypikpak.net" PCSdkVersion = "8.0.3" ) var DlAddr = []string{ - "dl-a10b-0621.mypikpak.com", - "dl-a10b-0622.mypikpak.com", - "dl-a10b-0623.mypikpak.com", - "dl-a10b-0624.mypikpak.com", - "dl-a10b-0625.mypikpak.com", - "dl-a10b-0858.mypikpak.com", - "dl-a10b-0859.mypikpak.com", - "dl-a10b-0860.mypikpak.com", - "dl-a10b-0861.mypikpak.com", - "dl-a10b-0862.mypikpak.com", - "dl-a10b-0863.mypikpak.com", - "dl-a10b-0864.mypikpak.com", - "dl-a10b-0865.mypikpak.com", - "dl-a10b-0866.mypikpak.com", - "dl-a10b-0867.mypikpak.com", - "dl-a10b-0868.mypikpak.com", - "dl-a10b-0869.mypikpak.com", - "dl-a10b-0870.mypikpak.com", - "dl-a10b-0871.mypikpak.com", - "dl-a10b-0872.mypikpak.com", - "dl-a10b-0873.mypikpak.com", - "dl-a10b-0874.mypikpak.com", - "dl-a10b-0875.mypikpak.com", - "dl-a10b-0876.mypikpak.com", - "dl-a10b-0877.mypikpak.com", - "dl-a10b-0878.mypikpak.com", - "dl-a10b-0879.mypikpak.com", - "dl-a10b-0880.mypikpak.com", - "dl-a10b-0881.mypikpak.com", - "dl-a10b-0882.mypikpak.com", - "dl-a10b-0883.mypikpak.com", - "dl-a10b-0884.mypikpak.com", - "dl-a10b-0885.mypikpak.com", - "dl-a10b-0886.mypikpak.com", - "dl-a10b-0887.mypikpak.com", + "dl-a10b-0621.mypikpak.net", + "dl-a10b-0622.mypikpak.net", + "dl-a10b-0623.mypikpak.net", + "dl-a10b-0624.mypikpak.net", + "dl-a10b-0625.mypikpak.net", + "dl-a10b-0858.mypikpak.net", + "dl-a10b-0859.mypikpak.net", + "dl-a10b-0860.mypikpak.net", + "dl-a10b-0861.mypikpak.net", + "dl-a10b-0862.mypikpak.net", + "dl-a10b-0863.mypikpak.net", + "dl-a10b-0864.mypikpak.net", + "dl-a10b-0865.mypikpak.net", + "dl-a10b-0866.mypikpak.net", + "dl-a10b-0867.mypikpak.net", + "dl-a10b-0868.mypikpak.net", + "dl-a10b-0869.mypikpak.net", + "dl-a10b-0870.mypikpak.net", + "dl-a10b-0871.mypikpak.net", + "dl-a10b-0872.mypikpak.net", + "dl-a10b-0873.mypikpak.net", + "dl-a10b-0874.mypikpak.net", + "dl-a10b-0875.mypikpak.net", + "dl-a10b-0876.mypikpak.net", + "dl-a10b-0877.mypikpak.net", + "dl-a10b-0878.mypikpak.net", + "dl-a10b-0879.mypikpak.net", + "dl-a10b-0880.mypikpak.net", + "dl-a10b-0881.mypikpak.net", + "dl-a10b-0882.mypikpak.net", + "dl-a10b-0883.mypikpak.net", + "dl-a10b-0884.mypikpak.net", + "dl-a10b-0885.mypikpak.net", + "dl-a10b-0886.mypikpak.net", + "dl-a10b-0887.mypikpak.net", } func (d *PikPakShare) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { @@ -159,7 +159,7 @@ func (d *PikPakShare) getSharePassToken() error { "limit": "100", } var resp ShareResp - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/share", http.MethodGet, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/share", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(query) }, &resp) if err != nil { @@ -187,7 +187,7 @@ func (d *PikPakShare) getFiles(id string) ([]File, error) { "pass_code_token": d.PassCodeToken, } var resp ShareResp - _, err := d.request("https://api-drive.mypikpak.com/drive/v1/share/detail", http.MethodGet, func(req *resty.Request) { + _, err := d.request("https://api-drive.mypikpak.net/drive/v1/share/detail", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(query) }, &resp) if err != nil { @@ -345,7 +345,7 @@ func (d *PikPakShare) refreshCaptchaToken(action string, metas map[string]string } var e ErrResp var resp CaptchaTokenResponse - _, err := d.request("https://user.mypikpak.com/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) { + _, err := d.request("https://user.mypikpak.net/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) { req.SetError(&e).SetBody(param) }, &resp) From 28d2367a87cfd76de59a24616d68128d3a9cc14a Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Sun, 17 Nov 2024 22:24:06 +0800 Subject: [PATCH 027/187] fix(ci): no space left on device --- .github/workflows/release.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6ef38566..beed6fcd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,6 +13,23 @@ jobs: name: Release runs-on: ${{ matrix.platform }} steps: + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + # this might remove tools that are actually needed, + # if set to "true" but frees about 6 GB + tool-cache: false + + # all of these default to true, but feel free to set to + # "false" if necessary for your workflow + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: true + swap-storage: true + - name: Prerelease uses: irongut/EditRelease@v1.2.0 with: @@ -32,7 +49,6 @@ jobs: - name: Install dependencies run: | - sudo snap install zig --classic --beta docker pull crazymax/xgo:latest go install github.com/crazy-max/xgo@latest sudo apt install upx From 0ba754fd406041d9a7e1bf3ce0e9949914239a49 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Sun, 17 Nov 2024 23:11:03 +0800 Subject: [PATCH 028/187] fix(release): missing installation of zig --- .github/workflows/release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index beed6fcd..1d42019a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -49,6 +49,7 @@ jobs: - name: Install dependencies run: | + sudo snap install zig --classic --beta docker pull crazymax/xgo:latest go install github.com/crazy-max/xgo@latest sudo apt install upx From 150dcc21474a05ecd61550b35c58821dec96bb73 Mon Sep 17 00:00:00 2001 From: Mmx Date: Thu, 21 Nov 2024 22:36:41 +0800 Subject: [PATCH 029/187] fix(sso): OIDC compatibility mode (#7524) --- server/handles/ssologin.go | 61 +++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 33 deletions(-) diff --git a/server/handles/ssologin.go b/server/handles/ssologin.go index 70298a9c..1acd0476 100644 --- a/server/handles/ssologin.go +++ b/server/handles/ssologin.go @@ -36,14 +36,21 @@ var opts = totp.ValidateOpts{ Algorithm: otp.AlgorithmSHA1, } +func ssoRedirectUri(c *gin.Context, useCompatibility bool, method string) string { + if useCompatibility { + return common.GetApiUrl(c.Request) + "/api/auth/" + method + } else { + return common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + method + } +} + func SSOLoginRedirect(c *gin.Context) { method := c.Query("method") - usecompatibility := setting.GetBool(conf.SSOCompatibilityMode) + useCompatibility := setting.GetBool(conf.SSOCompatibilityMode) enabled := setting.GetBool(conf.SSOLoginEnabled) clientId := setting.GetStr(conf.SSOClientId) platform := setting.GetStr(conf.SSOLoginPlatform) - var r_url string - var redirect_uri string + var rUrl string if !enabled { common.ErrorStrResp(c, "Single sign-on is not enabled", 403) return @@ -53,37 +60,33 @@ func SSOLoginRedirect(c *gin.Context) { common.ErrorStrResp(c, "no method provided", 400) return } - if usecompatibility { - redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/" + method - } else { - redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + method - } + redirectUri := ssoRedirectUri(c, useCompatibility, method) urlValues.Add("response_type", "code") - urlValues.Add("redirect_uri", redirect_uri) + urlValues.Add("redirect_uri", redirectUri) urlValues.Add("client_id", clientId) switch platform { case "Github": - r_url = "https://github.com/login/oauth/authorize?" + rUrl = "https://github.com/login/oauth/authorize?" urlValues.Add("scope", "read:user") case "Microsoft": - r_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize?" + rUrl = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize?" urlValues.Add("scope", "user.read") urlValues.Add("response_mode", "query") case "Google": - r_url = "https://accounts.google.com/o/oauth2/v2/auth?" + rUrl = "https://accounts.google.com/o/oauth2/v2/auth?" urlValues.Add("scope", "https://www.googleapis.com/auth/userinfo.profile") case "Dingtalk": - r_url = "https://login.dingtalk.com/oauth2/auth?" + rUrl = "https://login.dingtalk.com/oauth2/auth?" urlValues.Add("scope", "openid") urlValues.Add("prompt", "consent") urlValues.Add("response_type", "code") case "Casdoor": endpoint := strings.TrimSuffix(setting.GetStr(conf.SSOEndpointName), "/") - r_url = endpoint + "/login/oauth/authorize?" + rUrl = endpoint + "/login/oauth/authorize?" urlValues.Add("scope", "profile") urlValues.Add("state", endpoint) case "OIDC": - oauth2Config, err := GetOIDCClient(c) + oauth2Config, err := GetOIDCClient(c, useCompatibility, redirectUri, method) if err != nil { common.ErrorStrResp(c, err.Error(), 400) return @@ -100,22 +103,14 @@ func SSOLoginRedirect(c *gin.Context) { common.ErrorStrResp(c, "invalid platform", 400) return } - c.Redirect(302, r_url+urlValues.Encode()) + c.Redirect(302, rUrl+urlValues.Encode()) } var ssoClient = resty.New().SetRetryCount(3) -func GetOIDCClient(c *gin.Context) (*oauth2.Config, error) { - var redirect_uri string - usecompatibility := setting.GetBool(conf.SSOCompatibilityMode) - argument := c.Query("method") - if usecompatibility { - argument = path.Base(c.Request.URL.Path) - } - if usecompatibility { - redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/" + argument - } else { - redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + argument +func GetOIDCClient(c *gin.Context, useCompatibility bool, redirectUri, method string) (*oauth2.Config, error) { + if redirectUri == "" { + redirectUri = ssoRedirectUri(c, useCompatibility, method) } endpoint := setting.GetStr(conf.SSOEndpointName) provider, err := oidc.NewProvider(c, endpoint) @@ -127,7 +122,7 @@ func GetOIDCClient(c *gin.Context) (*oauth2.Config, error) { return &oauth2.Config{ ClientID: clientId, ClientSecret: clientSecret, - RedirectURL: redirect_uri, + RedirectURL: redirectUri, // Discovery returns the OAuth2 endpoints. Endpoint: provider.Endpoint(), @@ -181,9 +176,9 @@ func parseJWT(p string) ([]byte, error) { func OIDCLoginCallback(c *gin.Context) { useCompatibility := setting.GetBool(conf.SSOCompatibilityMode) - argument := c.Query("method") + method := c.Query("method") if useCompatibility { - argument = path.Base(c.Request.URL.Path) + method = path.Base(c.Request.URL.Path) } clientId := setting.GetStr(conf.SSOClientId) endpoint := setting.GetStr(conf.SSOEndpointName) @@ -192,7 +187,7 @@ func OIDCLoginCallback(c *gin.Context) { common.ErrorResp(c, err, 400) return } - oauth2Config, err := GetOIDCClient(c) + oauth2Config, err := GetOIDCClient(c, useCompatibility, "", method) if err != nil { common.ErrorResp(c, err, 400) return @@ -236,7 +231,7 @@ func OIDCLoginCallback(c *gin.Context) { common.ErrorStrResp(c, "cannot get username from OIDC provider", 400) return } - if argument == "get_sso_id" { + if method == "get_sso_id" { if useCompatibility { c.Redirect(302, common.GetApiUrl(c.Request)+"/@manage?sso_id="+userID) return @@ -252,7 +247,7 @@ func OIDCLoginCallback(c *gin.Context) { c.Data(200, "text/html; charset=utf-8", []byte(html)) return } - if argument == "sso_get_token" { + if method == "sso_get_token" { user, err := db.GetUserBySSOID(userID) if err != nil { user, err = autoRegister(userID, userID, err) From 12b429584ecceaf88af3465fc3d5bc580187e6ae Mon Sep 17 00:00:00 2001 From: Mmx Date: Thu, 21 Nov 2024 22:37:19 +0800 Subject: [PATCH 030/187] feat(security): generating random string with crypto rand (#7525) --- pkg/utils/random/random.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/pkg/utils/random/random.go b/pkg/utils/random/random.go index 65fbf14a..c3f3dd48 100644 --- a/pkg/utils/random/random.go +++ b/pkg/utils/random/random.go @@ -1,20 +1,27 @@ package random import ( - "math/rand" + "crypto/rand" + "math/big" + mathRand "math/rand" "time" "github.com/google/uuid" ) -var Rand *rand.Rand +var Rand *mathRand.Rand const letterBytes = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" func String(n int) string { b := make([]byte, n) + letterLen := big.NewInt(int64(len(letterBytes))) for i := range b { - b[i] = letterBytes[Rand.Intn(len(letterBytes))] + idx, err := rand.Int(rand.Reader, letterLen) + if err != nil { + panic(err) + } + b[i] = letterBytes[idx.Int64()] } return string(b) } @@ -24,10 +31,10 @@ func Token() string { } func RangeInt64(left, right int64) int64 { - return rand.Int63n(left+right) - left + return mathRand.Int63n(left+right) - left } func init() { - s := rand.NewSource(time.Now().UnixNano()) - Rand = rand.New(s) + s := mathRand.NewSource(time.Now().UnixNano()) + Rand = mathRand.New(s) } From 398c04386ae537d0cb6e79a360c0b155583bf146 Mon Sep 17 00:00:00 2001 From: Mmx Date: Thu, 21 Nov 2024 22:38:04 +0800 Subject: [PATCH 031/187] feat(sso): generate and verify OAuth state with go-cache (#7527) --- server/handles/ssologin.go | 44 ++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/server/handles/ssologin.go b/server/handles/ssologin.go index 1acd0476..cb5fc4ca 100644 --- a/server/handles/ssologin.go +++ b/server/handles/ssologin.go @@ -1,10 +1,10 @@ package handles import ( - "encoding/base32" "encoding/base64" "errors" "fmt" + "github.com/Xhofe/go-cache" "net/http" "net/url" "path" @@ -21,19 +21,28 @@ import ( "github.com/coreos/go-oidc" "github.com/gin-gonic/gin" "github.com/go-resty/resty/v2" - "github.com/pquerna/otp" - "github.com/pquerna/otp/totp" "golang.org/x/oauth2" "gorm.io/gorm" ) -var opts = totp.ValidateOpts{ - // state verify won't expire in 30 secs, which is quite enough for the callback - Period: 30, - Skew: 1, - // in some OIDC providers(such as Authelia), state parameter must be at least 8 characters - Digits: otp.DigitsEight, - Algorithm: otp.AlgorithmSHA1, +const stateLength = 16 +const stateExpire = time.Minute * 5 + +var stateCache = cache.NewMemCache[string](cache.WithShards[string](stateLength)) + +func _keyState(clientID, state string) string { + return fmt.Sprintf("%s_%s", clientID, state) +} + +func generateState(clientID, ip string) string { + state := random.String(stateLength) + stateCache.Set(_keyState(clientID, state), ip, cache.WithEx[string](stateExpire)) + return state +} + +func verifyState(clientID, ip, state string) bool { + value, ok := stateCache.Get(_keyState(clientID, state)) + return ok && value == ip } func ssoRedirectUri(c *gin.Context, useCompatibility bool, method string) string { @@ -91,12 +100,7 @@ func SSOLoginRedirect(c *gin.Context) { common.ErrorStrResp(c, err.Error(), 400) return } - // generate state parameter - state, err := totp.GenerateCodeCustom(base32.StdEncoding.EncodeToString([]byte(oauth2Config.ClientSecret)), time.Now(), opts) - if err != nil { - common.ErrorStrResp(c, err.Error(), 400) - return - } + state := generateState(clientId, c.ClientIP()) c.Redirect(http.StatusFound, oauth2Config.AuthCodeURL(state)) return default: @@ -192,13 +196,7 @@ func OIDCLoginCallback(c *gin.Context) { common.ErrorResp(c, err, 400) return } - // add state verify process - stateVerification, err := totp.ValidateCustom(c.Query("state"), base32.StdEncoding.EncodeToString([]byte(oauth2Config.ClientSecret)), time.Now(), opts) - if err != nil { - common.ErrorResp(c, err, 400) - return - } - if !stateVerification { + if !verifyState(clientId, c.ClientIP(), c.Query("state")) { common.ErrorStrResp(c, "incorrect or expired state parameter", 400) return } From 25c5e075a977f24a35489cb72b665cc440c96255 Mon Sep 17 00:00:00 2001 From: Rirmach Date: Thu, 21 Nov 2024 22:38:41 +0800 Subject: [PATCH 032/187] fix(local): Preserve file owner when copying (#7528) --- drivers/local/driver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/local/driver.go b/drivers/local/driver.go index c39cec10..229c8692 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -280,7 +280,7 @@ func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error { return cp.Copy(srcPath, dstPath, cp.Options{ Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS PreserveTimes: true, - NumOfWorkers: 0, // Serialized copy without using goroutine + PreserveOwner: true, }) } From 4c0cffd29b7d8b078388420c0211da31ec47a577 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Thu, 21 Nov 2024 22:39:14 +0800 Subject: [PATCH 033/187] fix(net): close of closed channel (#7529) --- internal/net/request.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/net/request.go b/internal/net/request.go index 088ff66a..c0f547ba 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "github.com/alist-org/alist/v3/pkg/utils" "io" "math" "net/http" @@ -13,6 +12,8 @@ import ( "sync" "time" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/pkg/http_range" "github.com/aws/aws-sdk-go/aws/awsutil" log "github.com/sirupsen/logrus" @@ -168,6 +169,9 @@ func (d *downloader) sendChunkTask() *chunk { // when the final reader Close, we interrupt func (d *downloader) interrupt() error { + if d.chunkChannel == nil { + return nil + } d.cancel() if d.written != d.params.Range.Length { log.Debugf("Downloader interrupt before finish") @@ -177,6 +181,7 @@ func (d *downloader) interrupt() error { } defer func() { close(d.chunkChannel) + d.chunkChannel = nil for _, buf := range d.bufs { buf.Close() } From 2dec756f232e2cd3be5c3bf4df18658dc605a240 Mon Sep 17 00:00:00 2001 From: YangXu <47767754+Three-taile-dragon@users.noreply.github.com> Date: Thu, 21 Nov 2024 22:40:39 +0800 Subject: [PATCH 034/187] fix(pikpak&pikpak_share): captcha_sign error (#7530 close #7481 close #7482) --- drivers/pikpak/driver.go | 16 ---- drivers/pikpak/meta.go | 18 ++--- drivers/pikpak/util.go | 138 +++++++------------------------- drivers/pikpak_share/driver.go | 16 ---- drivers/pikpak_share/meta.go | 12 ++- drivers/pikpak_share/util.go | 139 +++++++-------------------------- 6 files changed, 71 insertions(+), 268 deletions(-) diff --git a/drivers/pikpak/driver.go b/drivers/pikpak/driver.go index 24de24d4..5640d765 100644 --- a/drivers/pikpak/driver.go +++ b/drivers/pikpak/driver.go @@ -14,7 +14,6 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/oauth2" "net/http" - "regexp" "strconv" "strings" ) @@ -49,7 +48,6 @@ func (d *PikPak) Init(ctx context.Context) (err error) { d.Common.CaptchaToken = token op.MustSaveDriverStorage(d) }, - LowLatencyAddr: "", } } @@ -138,14 +136,6 @@ func (d *PikPak) Init(ctx context.Context) (err error) { d.Addition.RefreshToken = d.RefreshToken op.MustSaveDriverStorage(d) - if d.UseLowLatencyAddress && d.Addition.CustomLowLatencyAddress != "" { - d.Common.LowLatencyAddr = d.Addition.CustomLowLatencyAddress - } else if d.UseLowLatencyAddress { - d.Common.LowLatencyAddr = findLowestLatencyAddress(DlAddr) - d.Addition.CustomLowLatencyAddress = d.Common.LowLatencyAddr - op.MustSaveDriverStorage(d) - } - return nil } @@ -188,12 +178,6 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs) url = resp.Medias[0].Link.Url } - if d.UseLowLatencyAddress && d.Common.LowLatencyAddr != "" { - // 替换为加速链接 - re := regexp.MustCompile(`https://[^/]+/download/`) - url = re.ReplaceAllString(url, "https://"+d.Common.LowLatencyAddr+"/download/") - } - return &model.Link{ URL: url, }, nil diff --git a/drivers/pikpak/meta.go b/drivers/pikpak/meta.go index 4d25ecc6..7e525787 100644 --- a/drivers/pikpak/meta.go +++ b/drivers/pikpak/meta.go @@ -7,16 +7,14 @@ import ( type Addition struct { driver.RootID - Username string `json:"username" required:"true"` - Password string `json:"password" required:"true"` - Platform string `json:"platform" required:"true" type:"select" options:"android,web,pc"` - RefreshToken string `json:"refresh_token" required:"true" default:""` - RefreshTokenMethod string `json:"refresh_token_method" required:"true" type:"select" options:"oauth2,http"` - CaptchaToken string `json:"captcha_token" default:""` - DeviceID string `json:"device_id" required:"false" default:""` - DisableMediaLink bool `json:"disable_media_link" default:"true"` - UseLowLatencyAddress bool `json:"use_low_latency_address" default:"false"` - CustomLowLatencyAddress string `json:"custom_low_latency_address" default:""` + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` + Platform string `json:"platform" required:"true" default:"web" type:"select" options:"android,web,pc"` + RefreshToken string `json:"refresh_token" required:"true" default:""` + RefreshTokenMethod string `json:"refresh_token_method" required:"true" type:"select" options:"oauth2,http"` + CaptchaToken string `json:"captcha_token" default:""` + DeviceID string `json:"device_id" required:"false" default:""` + DisableMediaLink bool `json:"disable_media_link" default:"true"` } var config = driver.Config{ diff --git a/drivers/pikpak/util.go b/drivers/pikpak/util.go index 6c5c88ad..67077fb8 100644 --- a/drivers/pikpak/util.go +++ b/drivers/pikpak/util.go @@ -30,32 +30,34 @@ import ( // do others that not defined in Driver interface var AndroidAlgorithms = []string{ - "aDhgaSE3MsjROCmpmsWqP1sJdFJ", - "+oaVkqdd8MJuKT+uMr2AYKcd9tdWge3XPEPR2hcePUknd", - "u/sd2GgT2fTytRcKzGicHodhvIltMntA3xKw2SRv7S48OdnaQIS5mn", - "2WZiae2QuqTOxBKaaqCNHCW3olu2UImelkDzBn", - "/vJ3upic39lgmrkX855Qx", - "yNc9ruCVMV7pGV7XvFeuLMOcy1", - "4FPq8mT3JQ1jzcVxMVfwFftLQm33M7i", - "xozoy5e3Ea", + "7xOq4Z8s", + "QE9/9+IQco", + "WdX5J9CPLZp", + "NmQ5qFAXqH3w984cYhMeC5TJR8j", + "cc44M+l7GDhav", + "KxGjo/wHB+Yx8Lf7kMP+/m9I+", + "wla81BUVSmDkctHDpUT", + "c6wMr1sm1WxiR3i8LDAm3W", + "hRLrEQCFNYi0PFPV", + "o1J41zIraDtJPNuhBu7Ifb/q3", + "U", + "RrbZvV0CTu3gaZJ56PVKki4IeP", + "NNuRbLckJqUp1Do0YlrKCUP", + "UUwnBbipMTvInA0U0E9", + "VzGc", } var WebAlgorithms = []string{ - "C9qPpZLN8ucRTaTiUMWYS9cQvWOE", - "+r6CQVxjzJV6LCV", - "F", - "pFJRC", - "9WXYIDGrwTCz2OiVlgZa90qpECPD6olt", - "/750aCr4lm/Sly/c", - "RB+DT/gZCrbV", - "", - "CyLsf7hdkIRxRm215hl", - "7xHvLi2tOYP0Y92b", - "ZGTXXxu8E/MIWaEDB+Sm/", - "1UI3", - "E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO", - "ihtqpG6FMt65+Xk+tWUH2", - "NhXXU9rg4XXdzo7u5o", + "fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr", + "uSUX02HYJ1IkyLdhINEFcCf7l2", + "iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41", + "3binT1s/5a1pu3fGsN", + "8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5", + "DYS3StqnAEKdGddRP8CJrxUSFh", + "crquW+4", + "ryKqvW9B9hly+JAymXCIfag5Z", + "Hr08T/NDTX1oSJfHk90c", + "i", } var PCAlgorithms = []string{ @@ -80,59 +82,21 @@ const ( const ( AndroidClientID = "YNxT9w7GMdWvEOKa" AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg" - AndroidClientVersion = "1.48.3" + AndroidClientVersion = "1.49.3" AndroidPackageName = "com.pikcloud.pikpak" AndroidSdkVersion = "2.0.4.204101" WebClientID = "YUMx5nI8ZU8Ap8pm" WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg" - WebClientVersion = "2.0.0" - WebPackageName = "mypikpak.net" + WebClientVersion = "undefined" + WebPackageName = "drive.mypikpak.com" WebSdkVersion = "8.0.3" PCClientID = "YvtoWO6GNHiuCl7x" PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA" PCClientVersion = "undefined" // 2.5.6.4831 - PCPackageName = "mypikpak.net" + PCPackageName = "mypikpak.com" PCSdkVersion = "8.0.3" ) -var DlAddr = []string{ - "dl-a10b-0621.mypikpak.net", - "dl-a10b-0622.mypikpak.net", - "dl-a10b-0623.mypikpak.net", - "dl-a10b-0624.mypikpak.net", - "dl-a10b-0625.mypikpak.net", - "dl-a10b-0858.mypikpak.net", - "dl-a10b-0859.mypikpak.net", - "dl-a10b-0860.mypikpak.net", - "dl-a10b-0861.mypikpak.net", - "dl-a10b-0862.mypikpak.net", - "dl-a10b-0863.mypikpak.net", - "dl-a10b-0864.mypikpak.net", - "dl-a10b-0865.mypikpak.net", - "dl-a10b-0866.mypikpak.net", - "dl-a10b-0867.mypikpak.net", - "dl-a10b-0868.mypikpak.net", - "dl-a10b-0869.mypikpak.net", - "dl-a10b-0870.mypikpak.net", - "dl-a10b-0871.mypikpak.net", - "dl-a10b-0872.mypikpak.net", - "dl-a10b-0873.mypikpak.net", - "dl-a10b-0874.mypikpak.net", - "dl-a10b-0875.mypikpak.net", - "dl-a10b-0876.mypikpak.net", - "dl-a10b-0877.mypikpak.net", - "dl-a10b-0878.mypikpak.net", - "dl-a10b-0879.mypikpak.net", - "dl-a10b-0880.mypikpak.net", - "dl-a10b-0881.mypikpak.net", - "dl-a10b-0882.mypikpak.net", - "dl-a10b-0883.mypikpak.net", - "dl-a10b-0884.mypikpak.net", - "dl-a10b-0885.mypikpak.net", - "dl-a10b-0886.mypikpak.net", - "dl-a10b-0887.mypikpak.net", -} - func (d *PikPak) login() error { // 检查用户名和密码是否为空 if d.Addition.Username == "" || d.Addition.Password == "" { @@ -338,7 +302,6 @@ type Common struct { UserAgent string // 验证码token刷新成功回调 RefreshCTokenCk func(token string) - LowLatencyAddr string } func generateDeviceSign(deviceID, packageName string) string { @@ -729,46 +692,3 @@ func OssOption(params *S3Params) []oss.Option { } return options } - -type AddressLatency struct { - Address string - Latency time.Duration -} - -func checkLatency(address string, wg *sync.WaitGroup, ch chan<- AddressLatency) { - defer wg.Done() - start := time.Now() - resp, err := http.Get("https://" + address + "/generate_204") - if err != nil { - ch <- AddressLatency{Address: address, Latency: time.Hour} // Set high latency on error - return - } - defer resp.Body.Close() - latency := time.Since(start) - ch <- AddressLatency{Address: address, Latency: latency} -} - -func findLowestLatencyAddress(addresses []string) string { - var wg sync.WaitGroup - ch := make(chan AddressLatency, len(addresses)) - - for _, address := range addresses { - wg.Add(1) - go checkLatency(address, &wg, ch) - } - - wg.Wait() - close(ch) - - var lowestLatencyAddress string - lowestLatency := time.Hour - - for result := range ch { - if result.Latency < lowestLatency { - lowestLatency = result.Latency - lowestLatencyAddress = result.Address - } - } - - return lowestLatencyAddress -} diff --git a/drivers/pikpak_share/driver.go b/drivers/pikpak_share/driver.go index f107ac17..d527a1ab 100644 --- a/drivers/pikpak_share/driver.go +++ b/drivers/pikpak_share/driver.go @@ -4,7 +4,6 @@ import ( "context" "github.com/alist-org/alist/v3/internal/op" "net/http" - "regexp" "time" "github.com/alist-org/alist/v3/internal/driver" @@ -37,7 +36,6 @@ func (d *PikPakShare) Init(ctx context.Context) error { d.Common.CaptchaToken = token op.MustSaveDriverStorage(d) }, - LowLatencyAddr: "", } } @@ -71,14 +69,6 @@ func (d *PikPakShare) Init(ctx context.Context) error { d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36" } - if d.UseLowLatencyAddress && d.Addition.CustomLowLatencyAddress != "" { - d.Common.LowLatencyAddr = d.Addition.CustomLowLatencyAddress - } else if d.UseLowLatencyAddress { - d.Common.LowLatencyAddr = findLowestLatencyAddress(DlAddr) - d.Addition.CustomLowLatencyAddress = d.Common.LowLatencyAddr - op.MustSaveDriverStorage(d) - } - // 获取CaptchaToken err := d.RefreshCaptchaToken(GetAction(http.MethodGet, "https://api-drive.mypikpak.net/drive/v1/share:batch_file_info"), "") if err != nil { @@ -131,12 +121,6 @@ func (d *PikPakShare) Link(ctx context.Context, file model.Obj, args model.LinkA } - if d.UseLowLatencyAddress && d.Common.LowLatencyAddr != "" { - // 替换为加速链接 - re := regexp.MustCompile(`https://[^/]+/download/`) - downloadUrl = re.ReplaceAllString(downloadUrl, "https://"+d.Common.LowLatencyAddr+"/download/") - } - return &model.Link{ URL: downloadUrl, }, nil diff --git a/drivers/pikpak_share/meta.go b/drivers/pikpak_share/meta.go index dc551e02..30bccbdc 100644 --- a/drivers/pikpak_share/meta.go +++ b/drivers/pikpak_share/meta.go @@ -7,13 +7,11 @@ import ( type Addition struct { driver.RootID - ShareId string `json:"share_id" required:"true"` - SharePwd string `json:"share_pwd"` - Platform string `json:"platform" required:"true" type:"select" options:"android,web,pc"` - DeviceID string `json:"device_id" required:"false" default:""` - UseTransCodingAddress bool `json:"use_transcoding_address" required:"true" default:"false"` - UseLowLatencyAddress bool `json:"use_low_latency_address" default:"false"` - CustomLowLatencyAddress string `json:"custom_low_latency_address" default:""` + ShareId string `json:"share_id" required:"true"` + SharePwd string `json:"share_pwd"` + Platform string `json:"platform" default:"web" required:"true" type:"select" options:"android,web,pc"` + DeviceID string `json:"device_id" required:"false" default:""` + UseTransCodingAddress bool `json:"use_transcoding_address" required:"true" default:"false"` } var config = driver.Config{ diff --git a/drivers/pikpak_share/util.go b/drivers/pikpak_share/util.go index 1b14a65a..172a6148 100644 --- a/drivers/pikpak_share/util.go +++ b/drivers/pikpak_share/util.go @@ -10,7 +10,6 @@ import ( "net/http" "regexp" "strings" - "sync" "time" "github.com/alist-org/alist/v3/drivers/base" @@ -18,32 +17,34 @@ import ( ) var AndroidAlgorithms = []string{ - "aDhgaSE3MsjROCmpmsWqP1sJdFJ", - "+oaVkqdd8MJuKT+uMr2AYKcd9tdWge3XPEPR2hcePUknd", - "u/sd2GgT2fTytRcKzGicHodhvIltMntA3xKw2SRv7S48OdnaQIS5mn", - "2WZiae2QuqTOxBKaaqCNHCW3olu2UImelkDzBn", - "/vJ3upic39lgmrkX855Qx", - "yNc9ruCVMV7pGV7XvFeuLMOcy1", - "4FPq8mT3JQ1jzcVxMVfwFftLQm33M7i", - "xozoy5e3Ea", + "7xOq4Z8s", + "QE9/9+IQco", + "WdX5J9CPLZp", + "NmQ5qFAXqH3w984cYhMeC5TJR8j", + "cc44M+l7GDhav", + "KxGjo/wHB+Yx8Lf7kMP+/m9I+", + "wla81BUVSmDkctHDpUT", + "c6wMr1sm1WxiR3i8LDAm3W", + "hRLrEQCFNYi0PFPV", + "o1J41zIraDtJPNuhBu7Ifb/q3", + "U", + "RrbZvV0CTu3gaZJ56PVKki4IeP", + "NNuRbLckJqUp1Do0YlrKCUP", + "UUwnBbipMTvInA0U0E9", + "VzGc", } var WebAlgorithms = []string{ - "C9qPpZLN8ucRTaTiUMWYS9cQvWOE", - "+r6CQVxjzJV6LCV", - "F", - "pFJRC", - "9WXYIDGrwTCz2OiVlgZa90qpECPD6olt", - "/750aCr4lm/Sly/c", - "RB+DT/gZCrbV", - "", - "CyLsf7hdkIRxRm215hl", - "7xHvLi2tOYP0Y92b", - "ZGTXXxu8E/MIWaEDB+Sm/", - "1UI3", - "E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO", - "ihtqpG6FMt65+Xk+tWUH2", - "NhXXU9rg4XXdzo7u5o", + "fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr", + "uSUX02HYJ1IkyLdhINEFcCf7l2", + "iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41", + "3binT1s/5a1pu3fGsN", + "8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5", + "DYS3StqnAEKdGddRP8CJrxUSFh", + "crquW+4", + "ryKqvW9B9hly+JAymXCIfag5Z", + "Hr08T/NDTX1oSJfHk90c", + "i", } var PCAlgorithms = []string{ @@ -62,59 +63,21 @@ var PCAlgorithms = []string{ const ( AndroidClientID = "YNxT9w7GMdWvEOKa" AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg" - AndroidClientVersion = "1.48.3" + AndroidClientVersion = "1.49.3" AndroidPackageName = "com.pikcloud.pikpak" AndroidSdkVersion = "2.0.4.204101" WebClientID = "YUMx5nI8ZU8Ap8pm" WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg" - WebClientVersion = "2.0.0" - WebPackageName = "mypikpak.net" + WebClientVersion = "undefined" + WebPackageName = "drive.mypikpak.com" WebSdkVersion = "8.0.3" PCClientID = "YvtoWO6GNHiuCl7x" PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA" PCClientVersion = "undefined" // 2.5.6.4831 - PCPackageName = "mypikpak.net" + PCPackageName = "mypikpak.com" PCSdkVersion = "8.0.3" ) -var DlAddr = []string{ - "dl-a10b-0621.mypikpak.net", - "dl-a10b-0622.mypikpak.net", - "dl-a10b-0623.mypikpak.net", - "dl-a10b-0624.mypikpak.net", - "dl-a10b-0625.mypikpak.net", - "dl-a10b-0858.mypikpak.net", - "dl-a10b-0859.mypikpak.net", - "dl-a10b-0860.mypikpak.net", - "dl-a10b-0861.mypikpak.net", - "dl-a10b-0862.mypikpak.net", - "dl-a10b-0863.mypikpak.net", - "dl-a10b-0864.mypikpak.net", - "dl-a10b-0865.mypikpak.net", - "dl-a10b-0866.mypikpak.net", - "dl-a10b-0867.mypikpak.net", - "dl-a10b-0868.mypikpak.net", - "dl-a10b-0869.mypikpak.net", - "dl-a10b-0870.mypikpak.net", - "dl-a10b-0871.mypikpak.net", - "dl-a10b-0872.mypikpak.net", - "dl-a10b-0873.mypikpak.net", - "dl-a10b-0874.mypikpak.net", - "dl-a10b-0875.mypikpak.net", - "dl-a10b-0876.mypikpak.net", - "dl-a10b-0877.mypikpak.net", - "dl-a10b-0878.mypikpak.net", - "dl-a10b-0879.mypikpak.net", - "dl-a10b-0880.mypikpak.net", - "dl-a10b-0881.mypikpak.net", - "dl-a10b-0882.mypikpak.net", - "dl-a10b-0883.mypikpak.net", - "dl-a10b-0884.mypikpak.net", - "dl-a10b-0885.mypikpak.net", - "dl-a10b-0886.mypikpak.net", - "dl-a10b-0887.mypikpak.net", -} - func (d *PikPakShare) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { req := base.RestyClient.R() req.SetHeaders(map[string]string{ @@ -227,7 +190,6 @@ type Common struct { UserAgent string // 验证码token刷新成功回调 RefreshCTokenCk func(token string) - LowLatencyAddr string } func (c *Common) SetUserAgent(userAgent string) { @@ -367,46 +329,3 @@ func (d *PikPakShare) refreshCaptchaToken(action string, metas map[string]string d.Common.SetCaptchaToken(resp.CaptchaToken) return nil } - -type AddressLatency struct { - Address string - Latency time.Duration -} - -func checkLatency(address string, wg *sync.WaitGroup, ch chan<- AddressLatency) { - defer wg.Done() - start := time.Now() - resp, err := http.Get("https://" + address + "/generate_204") - if err != nil { - ch <- AddressLatency{Address: address, Latency: time.Hour} // Set high latency on error - return - } - defer resp.Body.Close() - latency := time.Since(start) - ch <- AddressLatency{Address: address, Latency: latency} -} - -func findLowestLatencyAddress(addresses []string) string { - var wg sync.WaitGroup - ch := make(chan AddressLatency, len(addresses)) - - for _, address := range addresses { - wg.Add(1) - go checkLatency(address, &wg, ch) - } - - wg.Wait() - close(ch) - - var lowestLatencyAddress string - lowestLatency := time.Hour - - for result := range ch { - if result.Latency < lowestLatency { - lowestLatency = result.Latency - lowestLatencyAddress = result.Address - } - } - - return lowestLatencyAddress -} From 94915b214838714431a6735ebd4632eeec28568d Mon Sep 17 00:00:00 2001 From: Kuingsmile Date: Thu, 21 Nov 2024 22:41:23 +0800 Subject: [PATCH 035/187] fix(baidu_netdisk): update fileToObj to use ServerCtime and ServerMtime (#7535) --- drivers/baidu_netdisk/types.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/baidu_netdisk/types.go b/drivers/baidu_netdisk/types.go index 6f3bf13b..728273b8 100644 --- a/drivers/baidu_netdisk/types.go +++ b/drivers/baidu_netdisk/types.go @@ -56,11 +56,11 @@ func fileToObj(f File) *model.ObjThumb { if f.ServerFilename == "" { f.ServerFilename = path.Base(f.Path) } - if f.LocalCtime == 0 { - f.LocalCtime = f.Ctime + if f.ServerCtime == 0 { + f.ServerCtime = f.Ctime } - if f.LocalMtime == 0 { - f.LocalMtime = f.Mtime + if f.ServerMtime == 0 { + f.ServerMtime = f.Mtime } return &model.ObjThumb{ Object: model.Object{ @@ -68,8 +68,8 @@ func fileToObj(f File) *model.ObjThumb { Path: f.Path, Name: f.ServerFilename, Size: f.Size, - Modified: time.Unix(f.LocalMtime, 0), - Ctime: time.Unix(f.LocalCtime, 0), + Modified: time.Unix(f.ServerMtime, 0), + Ctime: time.Unix(f.ServerCtime, 0), IsFolder: f.Isdir == 1, // 直接获取的MD5是错误的 From 492b49d77af21be06a2ec7e8259b3da3873375e3 Mon Sep 17 00:00:00 2001 From: alist666 Date: Sat, 7 Dec 2024 01:00:25 +0800 Subject: [PATCH 036/187] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bed2eadf..701bbc2f 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing] ## Document - + ## Demo @@ -138,4 +138,4 @@ The `AList` is open-source software licensed under the AGPL-3.0 license. --- -> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2) +> [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2) From 2d3605c6847554409a148c9ca549c10a1b618859 Mon Sep 17 00:00:00 2001 From: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Sat, 7 Dec 2024 17:02:52 +0800 Subject: [PATCH 037/187] fix(baidu_photo): cookie login fix download error (#7602) --- drivers/baidu_photo/driver.go | 31 ++++++----- drivers/baidu_photo/meta.go | 13 ++--- drivers/baidu_photo/utils.go | 99 +++++++++++++++++------------------ 3 files changed, 69 insertions(+), 74 deletions(-) diff --git a/drivers/baidu_photo/driver.go b/drivers/baidu_photo/driver.go index 94716983..d0d69e82 100644 --- a/drivers/baidu_photo/driver.go +++ b/drivers/baidu_photo/driver.go @@ -27,9 +27,9 @@ type BaiduPhoto struct { model.Storage Addition - AccessToken string - Uk int64 - root model.Obj + // AccessToken string + Uk int64 + root model.Obj uploadThread int } @@ -48,9 +48,9 @@ func (d *BaiduPhoto) Init(ctx context.Context) error { d.uploadThread, d.UploadThread = 3, "3" } - if err := d.refreshToken(); err != nil { - return err - } + // if err := d.refreshToken(); err != nil { + // return err + // } // root if d.AlbumID != "" { @@ -82,7 +82,7 @@ func (d *BaiduPhoto) GetRoot(ctx context.Context) (model.Obj, error) { } func (d *BaiduPhoto) Drop(ctx context.Context) error { - d.AccessToken = "" + // d.AccessToken = "" d.Uk = 0 d.root = nil return nil @@ -140,14 +140,13 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr // 处理共享相册 if d.Uk != file.Uk { // 有概率无法获取到链接 - return d.linkAlbum(ctx, file, args) + // return d.linkAlbum(ctx, file, args) - // 接口被限制,只能使用cookie - // f, err := d.CopyAlbumFile(ctx, file) - // if err != nil { - // return nil, err - // } - // return d.linkFile(ctx, f, args) + f, err := d.CopyAlbumFile(ctx, file) + if err != nil { + return nil, err + } + return d.linkFile(ctx, f, args) } return d.linkFile(ctx, &file.File, args) } @@ -292,7 +291,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil } // 尝试获取之前的进度 - precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5) + precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, strconv.FormatInt(d.Uk, 10), contentMd5) if !ok { _, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) { r.SetContext(ctx) @@ -343,7 +342,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if err = threadG.Wait(); err != nil { if errors.Is(err, context.Canceled) { precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 }) - base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5) + base.SaveUploadProgress(d, strconv.FormatInt(d.Uk, 10), contentMd5) } return nil, err } diff --git a/drivers/baidu_photo/meta.go b/drivers/baidu_photo/meta.go index da2229f5..3bc2f622 100644 --- a/drivers/baidu_photo/meta.go +++ b/drivers/baidu_photo/meta.go @@ -6,13 +6,14 @@ import ( ) type Addition struct { - RefreshToken string `json:"refresh_token" required:"true"` - ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"` - AlbumID string `json:"album_id"` + // RefreshToken string `json:"refresh_token" required:"true"` + Cookie string `json:"cookie" required:"true"` + ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"` + AlbumID string `json:"album_id"` //AlbumPassword string `json:"album_password"` - DeleteOrigin bool `json:"delete_origin"` - ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` - ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` + DeleteOrigin bool `json:"delete_origin"` + // ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` + // ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` } diff --git a/drivers/baidu_photo/utils.go b/drivers/baidu_photo/utils.go index c8c5b7ee..0b960593 100644 --- a/drivers/baidu_photo/utils.go +++ b/drivers/baidu_photo/utils.go @@ -10,9 +10,7 @@ import ( "unicode" "github.com/alist-org/alist/v3/drivers/base" - "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" ) @@ -27,7 +25,8 @@ const ( func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) { req := client.R(). - SetQueryParam("access_token", d.AccessToken) + // SetQueryParam("access_token", d.AccessToken) + SetHeader("Cookie", d.Cookie) if callback != nil { callback(req) } @@ -49,10 +48,10 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c return nil, fmt.Errorf("no shared albums found") case 50100: return nil, fmt.Errorf("illegal title, only supports 50 characters") - case -6: - if err = d.refreshToken(); err != nil { - return nil, err - } + // case -6: + // if err = d.refreshToken(); err != nil { + // return nil, err + // } default: return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron) } @@ -67,29 +66,29 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c // return res.Body(), nil //} -func (d *BaiduPhoto) refreshToken() error { - u := "https://openapi.baidu.com/oauth/2.0/token" - var resp base.TokenResp - var e TokenErrResp - _, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{ - "grant_type": "refresh_token", - "refresh_token": d.RefreshToken, - "client_id": d.ClientID, - "client_secret": d.ClientSecret, - }).Get(u) - if err != nil { - return err - } - if e.ErrorMsg != "" { - return &e - } - if resp.RefreshToken == "" { - return errs.EmptyToken - } - d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken - op.MustSaveDriverStorage(d) - return nil -} +// func (d *BaiduPhoto) refreshToken() error { +// u := "https://openapi.baidu.com/oauth/2.0/token" +// var resp base.TokenResp +// var e TokenErrResp +// _, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{ +// "grant_type": "refresh_token", +// "refresh_token": d.RefreshToken, +// "client_id": d.ClientID, +// "client_secret": d.ClientSecret, +// }).Get(u) +// if err != nil { +// return err +// } +// if e.ErrorMsg != "" { +// return &e +// } +// if resp.RefreshToken == "" { +// return errs.EmptyToken +// } +// d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken +// op.MustSaveDriverStorage(d) +// return nil +// } func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) { return d.Request(base.RestyClient, furl, http.MethodGet, callback, resp) @@ -363,10 +362,6 @@ func (d *BaiduPhoto) linkAlbum(ctx context.Context, file *AlbumFile, args model. location := resp.Header().Get("Location") - if err != nil { - return nil, err - } - link := &model.Link{ URL: location, Header: http.Header{ @@ -388,36 +383,36 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr headers["X-Forwarded-For"] = args.IP } - // var downloadUrl struct { - // Dlink string `json:"dlink"` - // } - // _, err := d.Get(FILE_API_URL_V1+"/download", func(r *resty.Request) { - // r.SetContext(ctx) - // r.SetHeaders(headers) - // r.SetQueryParams(map[string]string{ - // "fsid": fmt.Sprint(file.Fsid), - // }) - // }, &downloadUrl) - - resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) { + var downloadUrl struct { + Dlink string `json:"dlink"` + } + _, err := d.Get(FILE_API_URL_V2+"/download", func(r *resty.Request) { r.SetContext(ctx) r.SetHeaders(headers) r.SetQueryParams(map[string]string{ "fsid": fmt.Sprint(file.Fsid), }) - }, nil) + }, &downloadUrl) + + // resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) { + // r.SetContext(ctx) + // r.SetHeaders(headers) + // r.SetQueryParams(map[string]string{ + // "fsid": fmt.Sprint(file.Fsid), + // }) + // }, nil) if err != nil { return nil, err } - if resp.StatusCode() != 302 { - return nil, fmt.Errorf("not found 302 redirect") - } + // if resp.StatusCode() != 302 { + // return nil, fmt.Errorf("not found 302 redirect") + // } - location := resp.Header().Get("Location") + // location := resp.Header().Get("Location") link := &model.Link{ - URL: location, + URL: downloadUrl.Dlink, Header: http.Header{ "User-Agent": []string{headers["User-Agent"]}, "Referer": []string{"https://photo.baidu.com/"}, From fa15c576f0fcc2ced45f55986211fda0fe4229a6 Mon Sep 17 00:00:00 2001 From: YangXu <47767754+Three-taile-dragon@users.noreply.github.com> Date: Sat, 7 Dec 2024 17:03:46 +0800 Subject: [PATCH 038/187] fix(pikpak): remove oauth2 method (#7567 close #7545) --- drivers/pikpak/driver.go | 37 +++++-------------------------- drivers/pikpak/meta.go | 15 ++++++------- drivers/pikpak/util.go | 48 +++------------------------------------- 3 files changed, 15 insertions(+), 85 deletions(-) diff --git a/drivers/pikpak/driver.go b/drivers/pikpak/driver.go index 5640d765..3db273d6 100644 --- a/drivers/pikpak/driver.go +++ b/drivers/pikpak/driver.go @@ -12,7 +12,6 @@ import ( hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/go-resty/resty/v2" log "github.com/sirupsen/logrus" - "golang.org/x/oauth2" "net/http" "strconv" "strings" @@ -24,7 +23,6 @@ type PikPak struct { *Common RefreshToken string AccessToken string - oauth2Token oauth2.TokenSource } func (d *PikPak) Config() driver.Config { @@ -84,41 +82,16 @@ func (d *PikPak) Init(ctx context.Context) (err error) { d.Addition.DeviceID = d.Common.DeviceID op.MustSaveDriverStorage(d) } - // 初始化 oauth2Config - oauth2Config := &oauth2.Config{ - ClientID: d.ClientID, - ClientSecret: d.ClientSecret, - Endpoint: oauth2.Endpoint{ - AuthURL: "https://user.mypikpak.net/v1/auth/signin", - TokenURL: "https://user.mypikpak.net/v1/auth/token", - AuthStyle: oauth2.AuthStyleInParams, - }, - } - // 如果已经有RefreshToken,直接获取AccessToken if d.Addition.RefreshToken != "" { - if d.RefreshTokenMethod == "oauth2" { - // 使用 oauth2 刷新令牌 - // 初始化 oauth2Token - d.initializeOAuth2Token(ctx, oauth2Config, d.Addition.RefreshToken) - if err := d.refreshTokenByOAuth2(); err != nil { - return err - } - } else { - if err := d.refreshToken(d.Addition.RefreshToken); err != nil { - return err - } - } - - } else { - // 如果没有填写RefreshToken,尝试登录 获取 refreshToken - if err := d.login(); err != nil { + if err = d.refreshToken(d.Addition.RefreshToken); err != nil { return err } - if d.RefreshTokenMethod == "oauth2" { - d.initializeOAuth2Token(ctx, oauth2Config, d.RefreshToken) + } else { + // 如果没有填写RefreshToken,尝试登录 获取 refreshToken + if err = d.login(); err != nil { + return err } - } // 获取CaptchaToken diff --git a/drivers/pikpak/meta.go b/drivers/pikpak/meta.go index 7e525787..5abbc879 100644 --- a/drivers/pikpak/meta.go +++ b/drivers/pikpak/meta.go @@ -7,14 +7,13 @@ import ( type Addition struct { driver.RootID - Username string `json:"username" required:"true"` - Password string `json:"password" required:"true"` - Platform string `json:"platform" required:"true" default:"web" type:"select" options:"android,web,pc"` - RefreshToken string `json:"refresh_token" required:"true" default:""` - RefreshTokenMethod string `json:"refresh_token_method" required:"true" type:"select" options:"oauth2,http"` - CaptchaToken string `json:"captcha_token" default:""` - DeviceID string `json:"device_id" required:"false" default:""` - DisableMediaLink bool `json:"disable_media_link" default:"true"` + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` + Platform string `json:"platform" required:"true" default:"web" type:"select" options:"android,web,pc"` + RefreshToken string `json:"refresh_token" required:"true" default:""` + CaptchaToken string `json:"captcha_token" default:""` + DeviceID string `json:"device_id" required:"false" default:""` + DisableMediaLink bool `json:"disable_media_link" default:"true"` } var config = driver.Config{ diff --git a/drivers/pikpak/util.go b/drivers/pikpak/util.go index 67077fb8..e8f3c854 100644 --- a/drivers/pikpak/util.go +++ b/drivers/pikpak/util.go @@ -2,7 +2,6 @@ package pikpak import ( "bytes" - "context" "crypto/md5" "crypto/sha1" "encoding/hex" @@ -14,7 +13,6 @@ import ( "github.com/aliyun/aliyun-oss-go-sdk/oss" jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" - "golang.org/x/oauth2" "io" "net/http" "path/filepath" @@ -27,8 +25,6 @@ import ( "github.com/go-resty/resty/v2" ) -// do others that not defined in Driver interface - var AndroidAlgorithms = []string{ "7xOq4Z8s", "QE9/9+IQco", @@ -171,30 +167,6 @@ func (d *PikPak) refreshToken(refreshToken string) error { return nil } -func (d *PikPak) initializeOAuth2Token(ctx context.Context, oauth2Config *oauth2.Config, refreshToken string) { - d.oauth2Token = oauth2.ReuseTokenSource(nil, utils.TokenSource(func() (*oauth2.Token, error) { - return oauth2Config.TokenSource(ctx, &oauth2.Token{ - RefreshToken: refreshToken, - }).Token() - })) -} - -func (d *PikPak) refreshTokenByOAuth2() error { - token, err := d.oauth2Token.Token() - if err != nil { - return err - } - d.Status = "work" - d.RefreshToken = token.RefreshToken - d.AccessToken = token.AccessToken - // 获取用户ID - userID := token.Extra("sub").(string) - d.Common.SetUserID(userID) - d.Addition.RefreshToken = d.RefreshToken - op.MustSaveDriverStorage(d) - return nil -} - func (d *PikPak) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { req := base.RestyClient.R() req.SetHeaders(map[string]string{ @@ -203,14 +175,7 @@ func (d *PikPak) request(url string, method string, callback base.ReqCallback, r "X-Device-ID": d.GetDeviceID(), "X-Captcha-Token": d.GetCaptchaToken(), }) - if d.RefreshTokenMethod == "oauth2" && d.oauth2Token != nil { - // 使用oauth2 获取 access_token - token, err := d.oauth2Token.Token() - if err != nil { - return nil, err - } - req.SetAuthScheme(token.TokenType).SetAuthToken(token.AccessToken) - } else if d.AccessToken != "" { + if d.AccessToken != "" { req.SetHeader("Authorization", "Bearer "+d.AccessToken) } @@ -232,16 +197,9 @@ func (d *PikPak) request(url string, method string, callback base.ReqCallback, r return res.Body(), nil case 4122, 4121, 16: // access_token 过期 - if d.RefreshTokenMethod == "oauth2" { - if err1 := d.refreshTokenByOAuth2(); err1 != nil { - return nil, err1 - } - } else { - if err1 := d.refreshToken(d.RefreshToken); err1 != nil { - return nil, err1 - } + if err1 := d.refreshToken(d.RefreshToken); err1 != nil { + return nil, err1 } - return d.request(url, method, callback, resp) case 9: // 验证码token过期 if err = d.RefreshCaptchaTokenAtLogin(GetAction(method, url), d.GetUserID()); err != nil { From 5084d98398144e6ccd9cc627ada5cb54d7dbb526 Mon Sep 17 00:00:00 2001 From: shingyu Date: Sun, 8 Dec 2024 17:06:33 +0800 Subject: [PATCH 039/187] fix(onedrive): fix timeout error (#7551 close #7506) --- drivers/onedrive/util.go | 2 +- drivers/onedrive_app/util.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/onedrive/util.go b/drivers/onedrive/util.go index 9ee2dae9..95f92db6 100644 --- a/drivers/onedrive/util.go +++ b/drivers/onedrive/util.go @@ -127,7 +127,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, func (d *Onedrive) getFiles(path string) ([]File, error) { var res []File - nextLink := d.GetMetaUrl(false, path) + "/children?$top=5000&$expand=thumbnails($select=medium)&$select=id,name,size,fileSystemInfo,content.downloadUrl,file,parentReference" + nextLink := d.GetMetaUrl(false, path) + "/children?$top=1000&$expand=thumbnails($select=medium)&$select=id,name,size,fileSystemInfo,content.downloadUrl,file,parentReference" for nextLink != "" { var files Files _, err := d.Request(nextLink, http.MethodGet, nil, &files) diff --git a/drivers/onedrive_app/util.go b/drivers/onedrive_app/util.go index 28b34837..d036e131 100644 --- a/drivers/onedrive_app/util.go +++ b/drivers/onedrive_app/util.go @@ -118,7 +118,7 @@ func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallba func (d *OnedriveAPP) getFiles(path string) ([]File, error) { var res []File - nextLink := d.GetMetaUrl(false, path) + "/children?$top=5000&$expand=thumbnails($select=medium)&$select=id,name,size,lastModifiedDateTime,content.downloadUrl,file,parentReference" + nextLink := d.GetMetaUrl(false, path) + "/children?$top=1000&$expand=thumbnails($select=medium)&$select=id,name,size,lastModifiedDateTime,content.downloadUrl,file,parentReference" for nextLink != "" { var files Files _, err := d.Request(nextLink, http.MethodGet, nil, &files) From aa45a829146aef36dbebd564f155ccfcf115fba2 Mon Sep 17 00:00:00 2001 From: Shelton Zhu <498220739@qq.com> Date: Mon, 9 Dec 2024 23:33:07 +0800 Subject: [PATCH 040/187] fix(115): fix login bug (#7626 close #7614 close #7620) --- drivers/115/util.go | 5 ++--- drivers/115_share/utils.go | 2 +- go.mod | 4 ++-- go.sum | 11 ++++++----- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/115/util.go b/drivers/115/util.go index 33e34570..d7a1adff 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -27,8 +27,7 @@ import ( "github.com/pkg/errors" ) -//var UserAgent = driver115.UA115Browser - +// var UserAgent = driver115.UA115Browser func (d *Pan115) login() error { var err error opts := []driver115.Option{ @@ -46,7 +45,7 @@ func (d *Pan115) login() error { if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil { return errors.Wrap(err, "failed to login by qrcode") } - d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID) + d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID) d.QRCodeToken = "" } else if d.Cookie != "" { if err = cr.FromCookie(d.Cookie); err != nil { diff --git a/drivers/115_share/utils.go b/drivers/115_share/utils.go index 812352ef..1f9e112d 100644 --- a/drivers/115_share/utils.go +++ b/drivers/115_share/utils.go @@ -96,7 +96,7 @@ func (d *Pan115Share) login() error { if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil { return errors.Wrap(err, "failed to login by qrcode") } - d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID) + d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID) d.QRCodeToken = "" } else if d.Cookie != "" { if err = cr.FromCookie(d.Cookie); err != nil { diff --git a/go.mod b/go.mod index 19bc7c2e..be631823 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/alist-org/alist/v3 go 1.22.4 require ( - github.com/SheltonZhu/115driver v1.0.29 + github.com/SheltonZhu/115driver v1.0.32 github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4 github.com/alist-org/gofakes3 v0.0.7 @@ -64,7 +64,7 @@ require ( golang.org/x/image v0.19.0 golang.org/x/net v0.28.0 golang.org/x/oauth2 v0.22.0 - golang.org/x/time v0.6.0 + golang.org/x/time v0.8.0 google.golang.org/appengine v1.6.8 gopkg.in/ldap.v3 v3.1.0 gorm.io/driver/mysql v1.5.7 diff --git a/go.sum b/go.sum index 78ac273a..f1ff39b3 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,8 @@ github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9 github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= -github.com/SheltonZhu/115driver v1.0.29 h1:yFBqFDYJyADo3eG2RjJgSovnFd1OrpGHmsHBi6j0+r4= -github.com/SheltonZhu/115driver v1.0.29/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4= +github.com/SheltonZhu/115driver v1.0.32 h1:Taw1bnfcPJZW0xTdhDvEbBS1tccif7J7DslRp2NkDyQ= +github.com/SheltonZhu/115driver v1.0.32/go.mod h1:XXFi23pyhAgzUE8dUEKdGvIdUQKi3wv6zR7C1Do40D8= github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A= github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 h1:h6q5E9aMBhhdqouW81LozVPI1I+Pu6IxL2EKpfm5OjY= @@ -393,6 +393,8 @@ github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 h1:K3T3eu github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831/go.mod h1:lSHD4lC4zlMl+zcoysdJcd5KFzsWwOD8BJbyg1Ws9Ng= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= @@ -512,8 +514,6 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= -github.com/xhofe/tache v0.1.2 h1:pHrXlrWcbTb4G7hVUDW7Rc+YTUnLJvnLBrdktVE1Fqg= -github.com/xhofe/tache v0.1.2/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ= github.com/xhofe/tache v0.1.3 h1:MipxzlljYX29E1YI/SLC7hVomVF+51iP1OUzlsuq1wE= github.com/xhofe/tache v0.1.3/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ= github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A= @@ -663,8 +663,9 @@ golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= From 088120df8253623f937fe76cfdf4b471929a4f3b Mon Sep 17 00:00:00 2001 From: Joseph Chris Date: Mon, 9 Dec 2024 07:33:46 -0800 Subject: [PATCH 041/187] feat(sso): add custom extra scope support (#7577) --- internal/bootstrap/data/setting.go | 1 + internal/conf/const.go | 1 + server/handles/ssologin.go | 9 +++++++-- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index 920a7a2d..f1b98a70 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -164,6 +164,7 @@ func InitialSettings() []model.SettingItem { {Key: conf.SSOApplicationName, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSOEndpointName, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSOJwtPublicKey, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, + {Key: conf.SSOExtraScopes, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSOAutoRegister, Value: "false", Type: conf.TypeBool, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSODefaultDir, Value: "/", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSODefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.SSO, Flag: model.PRIVATE}, diff --git a/internal/conf/const.go b/internal/conf/const.go index 13787b5e..499e0a4f 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -72,6 +72,7 @@ const ( SSOApplicationName = "sso_application_name" SSOEndpointName = "sso_endpoint_name" SSOJwtPublicKey = "sso_jwt_public_key" + SSOExtraScopes = "sso_extra_scopes" SSOAutoRegister = "sso_auto_register" SSODefaultDir = "sso_default_dir" SSODefaultPermission = "sso_default_permission" diff --git a/server/handles/ssologin.go b/server/handles/ssologin.go index cb5fc4ca..62bd4aaa 100644 --- a/server/handles/ssologin.go +++ b/server/handles/ssologin.go @@ -4,13 +4,14 @@ import ( "encoding/base64" "errors" "fmt" - "github.com/Xhofe/go-cache" "net/http" "net/url" "path" "strings" "time" + "github.com/Xhofe/go-cache" + "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/db" "github.com/alist-org/alist/v3/internal/model" @@ -123,6 +124,10 @@ func GetOIDCClient(c *gin.Context, useCompatibility bool, redirectUri, method st } clientId := setting.GetStr(conf.SSOClientId) clientSecret := setting.GetStr(conf.SSOClientSecret) + extraScopes := []string{} + if setting.GetStr(conf.SSOExtraScopes) != "" { + extraScopes = strings.Split(setting.GetStr(conf.SSOExtraScopes), " ") + } return &oauth2.Config{ ClientID: clientId, ClientSecret: clientSecret, @@ -132,7 +137,7 @@ func GetOIDCClient(c *gin.Context, useCompatibility bool, redirectUri, method st Endpoint: provider.Endpoint(), // "openid" is a required scope for OpenID Connect flows. - Scopes: []string{oidc.ScopeOpenID, "profile"}, + Scopes: append([]string{oidc.ScopeOpenID, "profile"}, extraScopes...), }, nil } From 016e169c41dcc1ce255c86d3c391526080356305 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Mon, 9 Dec 2024 23:34:29 +0800 Subject: [PATCH 042/187] feat(139): support multipart upload (close: #7444) (#7630) * feat(139): support multipart upload (close: #7444) * feat(139): add custom upload part size option --- drivers/139/driver.go | 135 +++++++++++++++++++++++++++++++----------- drivers/139/meta.go | 5 +- drivers/139/types.go | 19 ++++++ 3 files changed, 122 insertions(+), 37 deletions(-) diff --git a/drivers/139/driver.go b/drivers/139/driver.go index d33c3d77..2fedc477 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -357,7 +357,10 @@ const ( TB ) -func getPartSize(size int64) int64 { +func (d *Yun139) getPartSize(size int64) int64 { + if d.CustomUploadPartSize != 0 { + return d.CustomUploadPartSize + } // 网盘对于分片数量存在上限 if size/GB > 30 { return 512 * MB @@ -380,24 +383,51 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return err } } - // return errs.NotImplement + + partInfos := []PartInfo{} + var partSize = d.getPartSize(stream.GetSize()) + part := (stream.GetSize() + partSize - 1) / partSize + if part == 0 { + part = 1 + } + for i := int64(0); i < part; i++ { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + start := i * partSize + byteSize := stream.GetSize() - start + if byteSize > partSize { + byteSize = partSize + } + partNumber := i + 1 + partInfo := PartInfo{ + PartNumber: partNumber, + PartSize: byteSize, + ParallelHashCtx: ParallelHashCtx{ + PartOffset: start, + }, + } + partInfos = append(partInfos, partInfo) + } + + // 筛选出前 100 个 partInfos + firstPartInfos := partInfos + if len(firstPartInfos) > 100 { + firstPartInfos = firstPartInfos[:100] + } + + // 获取上传信息和前100个分片的上传地址 data := base.Json{ "contentHash": fullHash, "contentHashAlgorithm": "SHA256", "contentType": "application/octet-stream", "parallelUpload": false, - "partInfos": []base.Json{{ - "parallelHashCtx": base.Json{ - "partOffset": 0, - }, - "partNumber": 1, - "partSize": stream.GetSize(), - }}, - "size": stream.GetSize(), - "parentFileId": dstDir.GetID(), - "name": stream.GetName(), - "type": "file", - "fileRenameMode": "auto_rename", + "partInfos": firstPartInfos, + "size": stream.GetSize(), + "parentFileId": dstDir.GetID(), + "name": stream.GetName(), + "type": "file", + "fileRenameMode": "auto_rename", } pathname := "/hcy/file/create" var resp PersonalUploadResp @@ -410,32 +440,67 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return nil } + uploadPartInfos := resp.Data.PartInfos + + // 获取后续分片的上传地址 + for i := 101; i < len(partInfos); i += 100 { + end := i + 100 + if end > len(partInfos) { + end = len(partInfos) + } + batchPartInfos := partInfos[i:end] + + moredata := base.Json{ + "fileId": resp.Data.FileId, + "uploadId": resp.Data.UploadId, + "partInfos": batchPartInfos, + "commonAccountInfo": base.Json{ + "account": d.Account, + "accountType": 1, + }, + } + pathname := "/hcy/file/getUploadUrl" + var moreresp PersonalUploadUrlResp + _, err = d.personalPost(pathname, moredata, &moreresp) + if err != nil { + return err + } + uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...) + } + // Progress p := driver.NewProgress(stream.GetSize(), up) - // Update Progress - r := io.TeeReader(stream, p) + // 上传所有分片 + for _, uploadPartInfo := range uploadPartInfos { + index := uploadPartInfo.PartNumber - 1 + partSize := partInfos[index].PartSize + log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos)) + limitReader := io.LimitReader(stream, partSize) - req, err := http.NewRequest("PUT", resp.Data.PartInfos[0].UploadUrl, r) - if err != nil { - return err - } - req = req.WithContext(ctx) - req.Header.Set("Content-Type", "application/octet-stream") - req.Header.Set("Content-Length", fmt.Sprint(stream.GetSize())) - req.Header.Set("Origin", "https://yun.139.com") - req.Header.Set("Referer", "https://yun.139.com/") - req.ContentLength = stream.GetSize() + // Update Progress + r := io.TeeReader(limitReader, p) - res, err := base.HttpClient.Do(req) - if err != nil { - return err - } + req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.Header.Set("Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", fmt.Sprint(partSize)) + req.Header.Set("Origin", "https://yun.139.com") + req.Header.Set("Referer", "https://yun.139.com/") + req.ContentLength = partSize - _ = res.Body.Close() - log.Debugf("%+v", res) - if res.StatusCode != http.StatusOK { - return fmt.Errorf("unexpected status code: %d", res.StatusCode) + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + _ = res.Body.Close() + log.Debugf("[139] uploaded: %+v", res) + if res.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", res.StatusCode) + } } data = base.Json{ @@ -496,7 +561,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr // Progress p := driver.NewProgress(stream.GetSize(), up) - var partSize = getPartSize(stream.GetSize()) + var partSize = d.getPartSize(stream.GetSize()) part := (stream.GetSize() + partSize - 1) / partSize if part == 0 { part = 1 diff --git a/drivers/139/meta.go b/drivers/139/meta.go index 56a4c1df..680e469d 100644 --- a/drivers/139/meta.go +++ b/drivers/139/meta.go @@ -9,8 +9,9 @@ type Addition struct { //Account string `json:"account" required:"true"` Authorization string `json:"authorization" type:"text" required:"true"` driver.RootID - Type string `json:"type" type:"select" options:"personal,family,personal_new" default:"personal"` - CloudID string `json:"cloud_id"` + Type string `json:"type" type:"select" options:"personal,family,personal_new" default:"personal"` + CloudID string `json:"cloud_id"` + CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` } var config = driver.Config{ diff --git a/drivers/139/types.go b/drivers/139/types.go index f7971966..42b939bf 100644 --- a/drivers/139/types.go +++ b/drivers/139/types.go @@ -196,6 +196,16 @@ type QueryContentListResp struct { } `json:"data"` } +type ParallelHashCtx struct { + PartOffset int64 `json:"partOffset"` +} + +type PartInfo struct { + PartNumber int64 `json:"partNumber"` + PartSize int64 `json:"partSize"` + ParallelHashCtx ParallelHashCtx `json:"parallelHashCtx"` +} + type PersonalThumbnail struct { Style string `json:"style"` Url string `json:"url"` @@ -235,6 +245,15 @@ type PersonalUploadResp struct { } } +type PersonalUploadUrlResp struct { + BaseResp + Data struct { + FileId string `json:"fileId"` + UploadId string `json:"uploadId"` + PartInfos []PersonalPartInfo `json:"partInfos"` + } +} + type RefreshTokenResp struct { XMLName xml.Name `xml:"root"` Return string `xml:"return"` From 2a035302b2e73c87f57945d66300c9a10f1c8127 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Mon, 9 Dec 2024 23:35:44 +0800 Subject: [PATCH 043/187] fix(cloudreve): support upload to remote and OneDrive storage (#7632 close #6882) - Add support for remote and OneDrive storage types - Implement new upload methods for different storage types - Update driver to handle various storage policies - Add error handling and session cleanup for failed uploads --- drivers/cloudreve/driver.go | 73 +++++++++++++++++---------- drivers/cloudreve/types.go | 8 +-- drivers/cloudreve/util.go | 99 +++++++++++++++++++++++++++++++++++++ 3 files changed, 150 insertions(+), 30 deletions(-) diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index ec0f6ef2..8fc117ac 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -10,6 +10,7 @@ import ( "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" @@ -134,6 +135,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File if io.ReadCloser(stream) == http.NoBody { return d.create(ctx, dstDir, stream) } + + // 获取存储策略 var r DirectoryResp err := d.request(http.MethodGet, "/directory"+dstDir.GetPath(), nil, &r) if err != nil { @@ -146,6 +149,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File "policy_id": r.Policy.Id, "last_modified": stream.ModTime().Unix(), } + + // 获取上传会话信息 var u UploadInfo err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) { req.SetBody(uploadBody) @@ -153,36 +158,50 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File if err != nil { return err } - var chunkSize = u.ChunkSize - var buf []byte - var chunk int - for { - var n int - buf = make([]byte, chunkSize) - n, err = io.ReadAtLeast(stream, buf, chunkSize) - if err != nil && err != io.ErrUnexpectedEOF { - if err == io.EOF { - return nil + + // 根据存储方式选择分片上传的方法 + switch r.Policy.Type { + case "onedrive": + err = d.upOneDrive(ctx, stream, u, up) + case "remote": // 从机存储 + err = d.upRemote(ctx, stream, u, up) + case "local": // 本机存储 + var chunkSize = u.ChunkSize + var buf []byte + var chunk int + for { + var n int + buf = make([]byte, chunkSize) + n, err = io.ReadAtLeast(stream, buf, chunkSize) + if err != nil && err != io.ErrUnexpectedEOF { + if err == io.EOF { + return nil + } + return err } - return err + if n == 0 { + break + } + buf = buf[:n] + err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { + req.SetHeader("Content-Type", "application/octet-stream") + req.SetHeader("Content-Length", strconv.Itoa(n)) + req.SetBody(buf) + }, nil) + if err != nil { + break + } + chunk++ } - - if n == 0 { - break - } - buf = buf[:n] - err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { - req.SetHeader("Content-Type", "application/octet-stream") - req.SetHeader("Content-Length", strconv.Itoa(n)) - req.SetBody(buf) - }, nil) - if err != nil { - break - } - chunk++ - + default: + err = errs.NotImplement } - return err + if err != nil { + // 删除失败的会话 + err = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil) + return err + } + return nil } func (d *Cloudreve) create(ctx context.Context, dir model.Obj, file model.Obj) error { diff --git a/drivers/cloudreve/types.go b/drivers/cloudreve/types.go index 241d993e..a7c3919e 100644 --- a/drivers/cloudreve/types.go +++ b/drivers/cloudreve/types.go @@ -21,9 +21,11 @@ type Policy struct { } type UploadInfo struct { - SessionID string `json:"sessionID"` - ChunkSize int `json:"chunkSize"` - Expires int `json:"expires"` + SessionID string `json:"sessionID"` + ChunkSize int `json:"chunkSize"` + Expires int `json:"expires"` + UploadURLs []string `json:"uploadURLs"` + Credential string `json:"credential,omitempty"` } type DirectoryResp struct { diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index 284e3289..b5b71153 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -1,16 +1,23 @@ package cloudreve import ( + "bytes" + "context" "encoding/base64" "errors" + "fmt" + "io" "net/http" + "strconv" "strings" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/pkg/cookie" + "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" json "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go" @@ -172,3 +179,95 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) { Thumbnail: resp.Header().Get("Location"), }, nil } + +func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { + uploadUrl := u.UploadURLs[0] + credential := u.Credential + var finish int64 = 0 + var chunk int = 0 + DEFAULT := int64(u.ChunkSize) + for finish < stream.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish) + var byteSize = DEFAULT + left := stream.GetSize() - finish + if left < DEFAULT { + byteSize = left + } + byteData := make([]byte, byteSize) + n, err := io.ReadFull(stream, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), bytes.NewBuffer(byteData)) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.Header.Set("Authorization", fmt.Sprint(credential)) + finish += byteSize + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + res.Body.Close() + up(float64(finish) * 100 / float64(stream.GetSize())) + chunk++ + } + return nil +} + +func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { + uploadUrl := u.UploadURLs[0] + var finish int64 = 0 + DEFAULT := int64(u.ChunkSize) + for finish < stream.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish) + var byteSize = DEFAULT + left := stream.GetSize() - finish + if left < DEFAULT { + byteSize = left + } + byteData := make([]byte, byteSize) + n, err := io.ReadFull(stream, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData)) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) + finish += byteSize + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession + if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 { + data, _ := io.ReadAll(res.Body) + res.Body.Close() + return errors.New(string(data)) + } + res.Body.Close() + up(float64(finish) * 100 / float64(stream.GetSize())) + } + // 上传成功发送回调请求 + err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) { + req.SetBody("{}") + }, nil) + if err != nil { + return err + } + return nil +} From a3908fd9a650e93bada0c715af371fc7a7fed33d Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Mon, 9 Dec 2024 23:54:21 +0800 Subject: [PATCH 044/187] fix(139): update APIs (#7591 close #7603) * fix(139): update family cloud API * fix(139): update API of familyGetLink * feat(139): support group (close #7603) * docs: add `139 group` to Readme * feat(139): support multipart upload (close: #7444) * feat(139): add custom upload part size option * fix: missing right big quote --------- Co-authored-by: Andy Hsu --- README.md | 2 +- README_cn.md | 2 +- README_ja.md | 2 +- drivers/139/driver.go | 177 +++++++++++++++++++++++++++++++++++++----- drivers/139/types.go | 23 ++++++ drivers/139/util.go | 88 ++++++++++++++++++++- 6 files changed, 268 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 701bbc2f..8140f325 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing] - [x] WebDav(Support OneDrive/SharePoint without API) - [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ )) - [x] [Mediatrack](https://www.mediatrack.cn/) - - [x] [139yun](https://yun.139.com/) (Personal, Family) + - [x] [139yun](https://yun.139.com/) (Personal, Family, Group) - [x] [YandexDisk](https://disk.yandex.com/) - [x] [BaiduNetdisk](http://pan.baidu.com/) - [x] [Terabox](https://www.terabox.com/main) diff --git a/README_cn.md b/README_cn.md index 7e45d60f..5c71ccce 100644 --- a/README_cn.md +++ b/README_cn.md @@ -58,7 +58,7 @@ - [x] WebDav(支持无API的OneDrive/SharePoint) - [x] Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ )) - [x] [分秒帧](https://www.mediatrack.cn/) - - [x] [和彩云](https://yun.139.com/) (个人云, 家庭云) + - [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组) - [x] [Yandex.Disk](https://disk.yandex.com/) - [x] [百度网盘](http://pan.baidu.com/) - [x] [UC网盘](https://drive.uc.cn) diff --git a/README_ja.md b/README_ja.md index 453e7b99..cd4446fa 100644 --- a/README_ja.md +++ b/README_ja.md @@ -58,7 +58,7 @@ - [x] WebDav(Support OneDrive/SharePoint without API) - [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ )) - [x] [Mediatrack](https://www.mediatrack.cn/) - - [x] [139yun](https://yun.139.com/) (Personal, Family) + - [x] [139yun](https://yun.139.com/) (Personal, Family, Group) - [x] [YandexDisk](https://disk.yandex.com/) - [x] [BaiduNetdisk](http://pan.baidu.com/) - [x] [Terabox](https://www.terabox.com/main) diff --git a/drivers/139/driver.go b/drivers/139/driver.go index 2fedc477..8862983c 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "path" "strconv" "strings" "time" @@ -14,15 +15,16 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/cron" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/pkg/utils/random" log "github.com/sirupsen/logrus" ) type Yun139 struct { model.Storage Addition - cron *cron.Cron + cron *cron.Cron Account string } @@ -56,6 +58,11 @@ func (d *Yun139) Init(ctx context.Context) error { d.RootFolderID = "root" } fallthrough + case MetaGroup: + if len(d.Addition.RootFolderID) == 0 { + d.RootFolderID = d.CloudID + } + fallthrough case MetaFamily: decode, err := base64.StdEncoding.DecodeString(d.Authorization) if err != nil { @@ -96,6 +103,8 @@ func (d *Yun139) List(ctx context.Context, dir model.Obj, args model.ListArgs) ( return d.getFiles(dir.GetID()) case MetaFamily: return d.familyGetFiles(dir.GetID()) + case MetaGroup: + return d.groupGetFiles(dir.GetID()) default: return nil, errs.NotImplement } @@ -108,9 +117,11 @@ func (d *Yun139) Link(ctx context.Context, file model.Obj, args model.LinkArgs) case MetaPersonalNew: url, err = d.personalGetLink(file.GetID()) case MetaPersonal: - fallthrough - case MetaFamily: url, err = d.getLink(file.GetID()) + case MetaFamily: + url, err = d.familyGetLink(file.GetID(), file.GetPath()) + case MetaGroup: + url, err = d.groupGetLink(file.GetID(), file.GetPath()) default: return nil, errs.NotImplement } @@ -154,8 +165,22 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin "accountType": 1, }, "docLibName": dirName, + "path": path.Join(parentDir.GetPath(), parentDir.GetID()), } - pathname := "/orchestration/familyCloud/cloudCatalog/v1.0/createCloudDoc" + pathname := "/orchestration/familyCloud-rebuild/cloudCatalog/v1.0/createCloudDoc" + _, err = d.post(pathname, data, nil) + case MetaGroup: + data := base.Json{ + "catalogName": dirName, + "commonAccountInfo": base.Json{ + "account": d.Account, + "accountType": 1, + }, + "groupID": d.CloudID, + "parentFileId": parentDir.GetID(), + "path": path.Join(parentDir.GetPath(), parentDir.GetID()), + } + pathname := "/orchestration/group-rebuild/catalog/v1.0/createGroupCatalog" _, err = d.post(pathname, data, nil) default: err = errs.NotImplement @@ -176,6 +201,34 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, return nil, err } return srcObj, nil + case MetaGroup: + var contentList []string + var catalogList []string + if srcObj.IsDir() { + catalogList = append(catalogList, srcObj.GetID()) + } else { + contentList = append(contentList, srcObj.GetID()) + } + data := base.Json{ + "taskType": 3, + "srcType": 2, + "srcGroupID": d.CloudID, + "destType": 2, + "destGroupID": d.CloudID, + "destPath": dstDir.GetPath(), + "contentList": contentList, + "catalogList": catalogList, + "commonAccountInfo": base.Json{ + "account": d.Account, + "accountType": 1, + }, + } + pathname := "/orchestration/group-rebuild/task/v1.0/createBatchOprTask" + _, err := d.post(pathname, data, nil) + if err != nil { + return nil, err + } + return srcObj, nil case MetaPersonal: var contentInfoList []string var catalogInfoList []string @@ -246,6 +299,65 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e pathname = "/orchestration/personalCloud/content/v1.0/updateContentInfo" } _, err = d.post(pathname, data, nil) + case MetaGroup: + var data base.Json + var pathname string + if srcObj.IsDir() { + data = base.Json{ + "groupID": d.CloudID, + "modifyCatalogID": srcObj.GetID(), + "modifyCatalogName": newName, + "path": srcObj.GetPath(), + "commonAccountInfo": base.Json{ + "account": d.Account, + "accountType": 1, + }, + } + pathname = "/orchestration/group-rebuild/catalog/v1.0/modifyGroupCatalog" + } else { + data = base.Json{ + "groupID": d.CloudID, + "contentID": srcObj.GetID(), + "contentName": newName, + "path": srcObj.GetPath(), + "commonAccountInfo": base.Json{ + "account": d.Account, + "accountType": 1, + }, + } + pathname = "/orchestration/group-rebuild/content/v1.0/modifyGroupContent" + } + _, err = d.post(pathname, data, nil) + case MetaFamily: + var data base.Json + var pathname string + if srcObj.IsDir() { + // 网页接口不支持重命名家庭云文件夹 + // data = base.Json{ + // "catalogType": 3, + // "catalogID": srcObj.GetID(), + // "catalogName": newName, + // "commonAccountInfo": base.Json{ + // "account": d.Account, + // "accountType": 1, + // }, + // "path": srcObj.GetPath(), + // } + // pathname = "/orchestration/familyCloud-rebuild/photoContent/v1.0/modifyCatalogInfo" + return errs.NotImplement + } else { + data = base.Json{ + "contentID": srcObj.GetID(), + "contentName": newName, + "commonAccountInfo": base.Json{ + "account": d.Account, + "accountType": 1, + }, + "path": srcObj.GetPath(), + } + pathname = "/orchestration/familyCloud-rebuild/photoContent/v1.0/modifyContentInfo" + } + _, err = d.post(pathname, data, nil) default: err = errs.NotImplement } @@ -303,6 +415,28 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { pathname := "/hcy/recyclebin/batchTrash" _, err := d.personalPost(pathname, data, nil) return err + case MetaGroup: + var contentList []string + var catalogList []string + // 必须使用完整路径删除 + if obj.IsDir() { + catalogList = append(catalogList, obj.GetPath()) + } else { + contentList = append(contentList, path.Join(obj.GetPath(), obj.GetID())) + } + data := base.Json{ + "taskType": 2, + "srcGroupID": d.CloudID, + "contentList": contentList, + "catalogList": catalogList, + "commonAccountInfo": base.Json{ + "account": d.Account, + "accountType": 1, + }, + } + pathname := "/orchestration/group-rebuild/task/v1.0/createBatchOprTask" + _, err := d.post(pathname, data, nil) + return err case MetaPersonal: fallthrough case MetaFamily: @@ -337,10 +471,12 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { "account": d.Account, "accountType": 1, }, + "sourceCloudID": d.CloudID, "sourceCatalogType": 1002, "taskType": 2, + "path": obj.GetPath(), } - pathname = "/orchestration/familyCloud/batchOprTask/v1.0/createBatchOprTask" + pathname = "/orchestration/familyCloud-rebuild/batchOprTask/v1.0/createBatchOprTask" } _, err := d.post(pathname, data, nil) return err @@ -536,21 +672,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } pathname := "/orchestration/personalCloud/uploadAndDownload/v1.0/pcUploadFileRequest" if d.isFamily() { - // data = d.newJson(base.Json{ - // "fileCount": 1, - // "manualRename": 2, - // "operation": 0, - // "path": "", - // "seqNo": "", - // "totalSize": 0, - // "uploadContentList": []base.Json{{ - // "contentName": stream.GetName(), - // "contentSize": 0, - // // "digest": "5a3231986ce7a6b46e408612d385bafa" - // }}, - // }) - // pathname = "/orchestration/familyCloud/content/v1.0/getFileUploadURL" - return errs.NotImplement + data = d.newJson(base.Json{ + "fileCount": 1, + "manualRename": 2, + "operation": 0, + "path": path.Join(dstDir.GetPath(), dstDir.GetID()), + "seqNo": random.String(32), //序列号不能为空 + "totalSize": 0, + "uploadContentList": []base.Json{{ + "contentName": stream.GetName(), + "contentSize": 0, + // "digest": "5a3231986ce7a6b46e408612d385bafa" + }}, + }) + pathname = "/orchestration/familyCloud-rebuild/content/v1.0/getFileUploadURL" } var resp UploadResp _, err := d.post(pathname, data, &resp) diff --git a/drivers/139/types.go b/drivers/139/types.go index 42b939bf..c34cba03 100644 --- a/drivers/139/types.go +++ b/drivers/139/types.go @@ -7,6 +7,7 @@ import ( const ( MetaPersonal string = "personal" MetaFamily string = "family" + MetaGroup string = "group" MetaPersonalNew string = "personal_new" ) @@ -54,6 +55,7 @@ type Content struct { //ContentDesc string `json:"contentDesc"` //ContentType int `json:"contentType"` //ContentOrigin int `json:"contentOrigin"` + CreateTime string `json:"createTime"` UpdateTime string `json:"updateTime"` //CommentCount int `json:"commentCount"` ThumbnailURL string `json:"thumbnailURL"` @@ -196,6 +198,27 @@ type QueryContentListResp struct { } `json:"data"` } +type QueryGroupContentListResp struct { + BaseResp + Data struct { + Result struct { + ResultCode string `json:"resultCode"` + ResultDesc string `json:"resultDesc"` + } `json:"result"` + GetGroupContentResult struct { + ParentCatalogID string `json:"parentCatalogID"` // 根目录是"0" + CatalogList []struct { + Catalog + Path string `json:"path"` + } `json:"catalogList"` + ContentList []Content `json:"contentList"` + NodeCount int `json:"nodeCount"` // 文件+文件夹数量 + CtlgCnt int `json:"ctlgCnt"` // 文件夹数量 + ContCnt int `json:"contCnt"` // 文件数量 + } `json:"getGroupContentResult"` + } `json:"data"` +} + type ParallelHashCtx struct { PartOffset int64 `json:"partOffset"` } diff --git a/drivers/139/util.go b/drivers/139/util.go index 5918e4c5..ccb6a912 100644 --- a/drivers/139/util.go +++ b/drivers/139/util.go @@ -13,9 +13,9 @@ import ( "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils/random" - "github.com/alist-org/alist/v3/internal/op" "github.com/go-resty/resty/v2" jsoniter "github.com/json-iterator/go" log "github.com/sirupsen/logrus" @@ -220,10 +220,11 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { "sortDirection": 1, }) var resp QueryContentListResp - _, err := d.post("/orchestration/familyCloud/content/v1.0/queryContentList", data, &resp) + _, err := d.post("/orchestration/familyCloud-rebuild/content/v1.2/queryContentList", data, &resp) if err != nil { return nil, err } + path := resp.Data.Path for _, catalog := range resp.Data.CloudCatalogList { f := model.Object{ ID: catalog.CatalogID, @@ -232,6 +233,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { IsFolder: true, Modified: getTime(catalog.LastUpdateTime), Ctime: getTime(catalog.CreateTime), + Path: path, // 文件夹上一级的Path } files = append(files, &f) } @@ -243,6 +245,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { Size: content.ContentSize, Modified: getTime(content.LastUpdateTime), Ctime: getTime(content.CreateTime), + Path: path, // 文件所在目录的Path }, Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL}, //Thumbnail: content.BigthumbnailURL, @@ -257,6 +260,61 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { return files, nil } +func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) { + pageNum := 1 + files := make([]model.Obj, 0) + for { + data := d.newJson(base.Json{ + "groupID": d.CloudID, + "catalogID": catalogID, + "contentSortType": 0, + "sortDirection": 1, + "startNumber": pageNum, + "endNumber": pageNum + 99, + "path": catalogID, + }) + + var resp QueryGroupContentListResp + _, err := d.post("/orchestration/group-rebuild/content/v1.0/queryGroupContentList", data, &resp) + if err != nil { + return nil, err + } + path := resp.Data.GetGroupContentResult.ParentCatalogID + for _, catalog := range resp.Data.GetGroupContentResult.CatalogList { + f := model.Object{ + ID: catalog.CatalogID, + Name: catalog.CatalogName, + Size: 0, + IsFolder: true, + Modified: getTime(catalog.UpdateTime), + Ctime: getTime(catalog.CreateTime), + Path: catalog.Path, // 文件夹的真实Path, root:/开头 + } + files = append(files, &f) + } + for _, content := range resp.Data.GetGroupContentResult.ContentList { + f := model.ObjThumb{ + Object: model.Object{ + ID: content.ContentID, + Name: content.ContentName, + Size: content.ContentSize, + Modified: getTime(content.UpdateTime), + Ctime: getTime(content.CreateTime), + Path: path, // 文件所在目录的Path + }, + Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL}, + //Thumbnail: content.BigthumbnailURL, + } + files = append(files, &f) + } + if pageNum > resp.Data.GetGroupContentResult.NodeCount { + break + } + pageNum = pageNum + 100 + } + return files, nil +} + func (d *Yun139) getLink(contentId string) (string, error) { data := base.Json{ "appName": "", @@ -273,6 +331,32 @@ func (d *Yun139) getLink(contentId string) (string, error) { } return jsoniter.Get(res, "data", "downloadURL").ToString(), nil } +func (d *Yun139) familyGetLink(contentId string, path string) (string, error) { + data := d.newJson(base.Json{ + "contentID": contentId, + "path": path, + }) + res, err := d.post("/orchestration/familyCloud-rebuild/content/v1.0/getFileDownLoadURL", + data, nil) + if err != nil { + return "", err + } + return jsoniter.Get(res, "data", "downloadURL").ToString(), nil +} + +func (d *Yun139) groupGetLink(contentId string, path string) (string, error) { + data := d.newJson(base.Json{ + "contentID": contentId, + "groupID": d.CloudID, + "path": path, + }) + res, err := d.post("/orchestration/group-rebuild/groupManage/v1.0/getGroupFileDownLoadURL", + data, nil) + if err != nil { + return "", err + } + return jsoniter.Get(res, "data", "downloadURL").ToString(), nil +} func unicode(str string) string { textQuoted := strconv.QuoteToASCII(str) From 734184649991a7319bc857a258e4e2e65e941ec6 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Tue, 10 Dec 2024 19:30:50 +0800 Subject: [PATCH 045/187] perf(task): merge requests of operating selected (#7637) --- server/handles/task.go | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/server/handles/task.go b/server/handles/task.go index 71b4c622..5f996505 100644 --- a/server/handles/task.go +++ b/server/handles/task.go @@ -90,6 +90,31 @@ func getTargetedHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], c } } +func getBatchHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], callback func(task T)) gin.HandlerFunc { + return func(c *gin.Context) { + isAdmin, uid, ok := getUserInfo(c) + if !ok { + common.ErrorStrResp(c, "user invalid", 401) + return + } + var tids []string + if err := c.ShouldBind(&tids); err != nil { + common.ErrorStrResp(c, "invalid request format", 400) + return + } + retErrs := make(map[string]string) + for _, tid := range tids { + t, ok := manager.GetByID(tid) + if !ok || (!isAdmin && uid != t.GetCreator().ID) { + retErrs[tid] = "task not found" + continue + } + callback(t) + } + common.SuccessResp(c, retErrs) + } +} + func taskRoute[T task.TaskInfoWithCreator](g *gin.RouterGroup, manager *tache.Manager[T]) { g.GET("/undone", func(c *gin.Context) { isAdmin, uid, ok := getUserInfo(c) @@ -132,6 +157,15 @@ func taskRoute[T task.TaskInfoWithCreator](g *gin.RouterGroup, manager *tache.Ma manager.Retry(task.GetID()) common.SuccessResp(c) })) + g.POST("/cancel_some", getBatchHandler(manager, func(task T) { + manager.Cancel(task.GetID()) + })) + g.POST("/delete_some", getBatchHandler(manager, func(task T) { + manager.Remove(task.GetID()) + })) + g.POST("/retry_some", getBatchHandler(manager, func(task T) { + manager.Retry(task.GetID()) + })) g.POST("/clear_done", func(c *gin.Context) { isAdmin, uid, ok := getUserInfo(c) if !ok { From 650b03aeb1feea8b51ebc6b4b48ffb885fdc41d6 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Tue, 10 Dec 2024 20:17:46 +0800 Subject: [PATCH 046/187] feat: ftp server support (#7634 close #1898) * feat: ftp server support * fix(ftp): incorrect mode for dirs in LIST returns --- cmd/server.go | 29 +++ go.mod | 7 +- go.sum | 17 ++ internal/bootstrap/data/setting.go | 10 + internal/conf/config.go | 26 +++ internal/conf/const.go | 9 + internal/model/setting.go | 1 + internal/model/user.go | 8 + server/ftp.go | 285 +++++++++++++++++++++++++++++ server/ftp/afero.go | 91 +++++++++ server/ftp/fsmanage.go | 75 ++++++++ server/ftp/fsread.go | 188 +++++++++++++++++++ server/ftp/fsup.go | 91 +++++++++ 13 files changed, 835 insertions(+), 2 deletions(-) create mode 100644 server/ftp.go create mode 100644 server/ftp/afero.go create mode 100644 server/ftp/fsmanage.go create mode 100644 server/ftp/fsread.go create mode 100644 server/ftp/fsup.go diff --git a/cmd/server.go b/cmd/server.go index 8a7beafa..66b57952 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" "net" "net/http" "os" @@ -112,6 +113,24 @@ the address is defined in config file`, } }() } + var ftpDriver *server.FtpMainDriver + var ftpServer *ftpserver.FtpServer + if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable { + var err error + ftpDriver, err = server.NewMainDriver() + if err != nil { + utils.Log.Fatalf("failed to start ftp driver: %s", err.Error()) + } else { + utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen) + go func() { + ftpServer = ftpserver.NewFtpServer(ftpDriver) + err = ftpServer.ListenAndServe() + if err != nil { + utils.Log.Fatalf("problem ftp server listening: %s", err.Error()) + } + }() + } + } // Wait for interrupt signal to gracefully shutdown the server with // a timeout of 1 second. quit := make(chan os.Signal, 1) @@ -152,6 +171,16 @@ the address is defined in config file`, } }() } + if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil { + wg.Add(1) + go func() { + defer wg.Done() + ftpDriver.Stop() + if err := ftpServer.Stop(); err != nil { + utils.Log.Fatal("FTP server shutdown err: ", err) + } + }() + } wg.Wait() utils.Log.Println("Server exit") }, diff --git a/go.mod b/go.mod index be631823..259521e9 100644 --- a/go.mod +++ b/go.mod @@ -50,8 +50,9 @@ require ( github.com/pquerna/otp v1.4.0 github.com/rclone/rclone v1.67.0 github.com/sirupsen/logrus v1.9.3 + github.com/spf13/afero v1.11.0 github.com/spf13/cobra v1.8.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 github.com/u2takey/ffmpeg-go v0.5.0 github.com/upyun/go-sdk/v3 v3.0.4 @@ -75,6 +76,7 @@ require ( require ( github.com/BurntSushi/toml v0.3.1 // indirect + github.com/KirCute/ftpserverlib-pasvportmap v0.0.0-20241208190057-c9a7bf2571e2 // indirect github.com/blevesearch/go-faiss v1.0.20 // indirect github.com/blevesearch/zapx/v16 v16.1.5 // indirect github.com/bytedance/sonic/loader v0.1.1 // indirect @@ -83,6 +85,7 @@ require ( github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/fclairamb/go-log v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hekmon/cunits/v2 v2.1.0 // indirect github.com/ipfs/boxo v0.12.0 // indirect @@ -221,7 +224,7 @@ require ( go.etcd.io/bbolt v1.3.8 // indirect golang.org/x/arch v0.8.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect + golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.24.0 // indirect golang.org/x/text v0.18.0 // indirect golang.org/x/tools v0.24.0 // indirect diff --git a/go.sum b/go.sum index f1ff39b3..dcad05c9 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,11 @@ +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/KirCute/ftpserverlib-pasvportmap v0.0.0-20241208190057-c9a7bf2571e2 h1:P3MoQ1kDfbCjL6+MPd5K7wPdKB4nqMuLU6Mv0+tdWDA= +github.com/KirCute/ftpserverlib-pasvportmap v0.0.0-20241208190057-c9a7bf2571e2/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= @@ -144,6 +147,8 @@ github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJL github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/fclairamb/go-log v0.5.0 h1:Gz9wSamEaA6lta4IU2cjJc2xSq5sV5VYSB5w/SUHhVc= +github.com/fclairamb/go-log v0.5.0/go.mod h1:XoRO1dYezpsGmLLkZE9I+sHqpqY65p8JA+Vqblb7k40= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/foxxorcat/mopan-sdk-go v0.1.6 h1:6J37oI4wMZLj8EPgSCcSTTIbnI5D6RCNW/srX8vQd1Y= @@ -168,6 +173,10 @@ github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -441,6 +450,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= +github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo= +github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc= github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU= @@ -459,6 +470,8 @@ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:s github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -481,6 +494,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= @@ -634,6 +649,8 @@ golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index f1b98a70..206273b4 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -185,6 +185,16 @@ func InitialSettings() []model.SettingItem { {Key: conf.S3AccessKeyId, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, {Key: conf.S3SecretAccessKey, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, {Key: conf.S3Buckets, Value: "[]", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, + + //ftp settings + {Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " + + "Chrome/87.0.4280.88 Safari/537.36", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPMandatoryTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, } initialSettingItems = append(initialSettingItems, tool.Tools.Items()...) if flags.Dev { diff --git a/internal/conf/config.go b/internal/conf/config.go index aa29e1f5..df6c0544 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -71,6 +71,19 @@ type S3 struct { SSL bool `json:"ssl" env:"SSL"` } +type FTP struct { + Enable bool `json:"enable" env:"ENABLE"` + Listen string `json:"listen" env:"LISTEN"` + FindPasvPortAttempts int `json:"find_pasv_port_attempts" env:"FIND_PASV_PORT_ATTEMPTS"` + ActiveTransferPortNon20 bool `json:"active_transfer_port_non_20" env:"ACTIVE_TRANSFER_PORT_NON_20"` + IdleTimeout int `json:"idle_timeout" env:"IDLE_TIMEOUT"` + ConnectionTimeout int `json:"connection_timeout" env:"CONNECTION_TIMEOUT"` + DisableActiveMode bool `json:"disable_active_mode" env:"DISABLE_ACTIVE_MODE"` + DefaultTransferBinary bool `json:"default_transfer_binary" env:"DEFAULT_TRANSFER_BINARY"` + EnableActiveConnIPCheck bool `json:"enable_active_conn_ip_check" env:"ENABLE_ACTIVE_CONN_IP_CHECK"` + EnablePasvConnIPCheck bool `json:"enable_pasv_conn_ip_check" env:"ENABLE_PASV_CONN_IP_CHECK"` +} + type Config struct { Force bool `json:"force" env:"FORCE"` SiteURL string `json:"site_url" env:"SITE_URL"` @@ -90,6 +103,7 @@ type Config struct { Tasks TasksConfig `json:"tasks" envPrefix:"TASKS_"` Cors Cors `json:"cors" envPrefix:"CORS_"` S3 S3 `json:"s3" envPrefix:"S3_"` + FTP FTP `json:"ftp" envPrefix:"FTP_"` } func DefaultConfig() *Config { @@ -159,5 +173,17 @@ func DefaultConfig() *Config { Port: 5246, SSL: false, }, + FTP: FTP{ + Enable: true, + Listen: ":5221", + FindPasvPortAttempts: 50, + ActiveTransferPortNon20: false, + IdleTimeout: 900, + ConnectionTimeout: 30, + DisableActiveMode: false, + DefaultTransferBinary: false, + EnableActiveConnIPCheck: true, + EnablePasvConnIPCheck: true, + }, } } diff --git a/internal/conf/const.go b/internal/conf/const.go index 499e0a4f..99e8c868 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -97,6 +97,15 @@ const ( // qbittorrent QbittorrentUrl = "qbittorrent_url" QbittorrentSeedtime = "qbittorrent_seedtime" + + // ftp + FTPPublicHost = "ftp_public_host" + FTPPasvPortMap = "ftp_pasv_port_map" + FTPProxyUserAgent = "ftp_proxy_user_agent" + FTPMandatoryTLS = "ftp_mandatory_tls" + FTPImplicitTLS = "ftp_implicit_tls" + FTPTLSPrivateKeyPath = "ftp_tls_private_key_path" + FTPTLSPublicCertPath = "ftp_tls_public_cert_path" ) const ( diff --git a/internal/model/setting.go b/internal/model/setting.go index c474935e..9b60d98a 100644 --- a/internal/model/setting.go +++ b/internal/model/setting.go @@ -11,6 +11,7 @@ const ( SSO LDAP S3 + FTP ) const ( diff --git a/internal/model/user.go b/internal/model/user.go index 2d61a971..b4e876a4 100644 --- a/internal/model/user.go +++ b/internal/model/user.go @@ -117,6 +117,14 @@ func (u *User) CanWebdavManage() bool { return u.IsAdmin() || (u.Permission>>9)&1 == 1 } +func (u *User) CanFTPAccess() bool { + return (u.Permission>>10)&1 == 1 +} + +func (u *User) CanFTPManage() bool { + return (u.Permission>>11)&1 == 1 +} + func (u *User) JoinPath(reqPath string) (string, error) { return utils.JoinBasePath(u.BasePath, reqPath) } diff --git a/server/ftp.go b/server/ftp.go new file mode 100644 index 00000000..161ea63c --- /dev/null +++ b/server/ftp.go @@ -0,0 +1,285 @@ +package server + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/ftp" + "math/rand" + "net" + "net/http" + "os" + "strconv" + "strings" + "sync" +) + +type FtpMainDriver struct { + settings *ftpserver.Settings + proxyHeader *http.Header + clients map[uint32]ftpserver.ClientContext + shutdownLock sync.RWMutex + isShutdown bool + tlsConfig *tls.Config +} + +func NewMainDriver() (*FtpMainDriver, error) { + header := &http.Header{} + header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent)) + transferType := ftpserver.TransferTypeASCII + if conf.Conf.FTP.DefaultTransferBinary { + transferType = ftpserver.TransferTypeBinary + } + activeConnCheck := ftpserver.IPMatchDisabled + if conf.Conf.FTP.EnableActiveConnIPCheck { + activeConnCheck = ftpserver.IPMatchRequired + } + pasvConnCheck := ftpserver.IPMatchDisabled + if conf.Conf.FTP.EnablePasvConnIPCheck { + pasvConnCheck = ftpserver.IPMatchRequired + } + tlsRequired := ftpserver.ClearOrEncrypted + if setting.GetBool(conf.FTPImplicitTLS) { + tlsRequired = ftpserver.ImplicitEncryption + } else if setting.GetBool(conf.FTPMandatoryTLS) { + tlsRequired = ftpserver.MandatoryEncryption + } + tlsConf, err := getTlsConf(setting.GetStr(conf.FTPTLSPrivateKeyPath), setting.GetStr(conf.FTPTLSPublicCertPath)) + if err != nil && tlsRequired != ftpserver.ClearOrEncrypted { + return nil, fmt.Errorf("FTP mandatory TLS has been enabled, but the certificate failed to load: %w", err) + } + return &FtpMainDriver{ + settings: &ftpserver.Settings{ + ListenAddr: conf.Conf.FTP.Listen, + PublicHost: lookupIP(setting.GetStr(conf.FTPPublicHost)), + PassiveTransferPortGetter: newPortMapper(setting.GetStr(conf.FTPPasvPortMap)), + FindPasvPortAttempts: conf.Conf.FTP.FindPasvPortAttempts, + ActiveTransferPortNon20: conf.Conf.FTP.ActiveTransferPortNon20, + IdleTimeout: conf.Conf.FTP.IdleTimeout, + ConnectionTimeout: conf.Conf.FTP.ConnectionTimeout, + DisableMLSD: false, + DisableMLST: false, + DisableMFMT: true, + Banner: setting.GetStr(conf.Announcement), + TLSRequired: tlsRequired, + DisableLISTArgs: false, + DisableSite: true, + DisableActiveMode: conf.Conf.FTP.DisableActiveMode, + EnableHASH: false, + DisableSTAT: false, + DisableSYST: false, + EnableCOMB: false, + DefaultTransferType: transferType, + ActiveConnectionsCheck: activeConnCheck, + PasvConnectionsCheck: pasvConnCheck, + }, + proxyHeader: header, + clients: make(map[uint32]ftpserver.ClientContext), + shutdownLock: sync.RWMutex{}, + isShutdown: false, + tlsConfig: tlsConf, + }, nil +} + +func (d *FtpMainDriver) GetSettings() (*ftpserver.Settings, error) { + return d.settings, nil +} + +func (d *FtpMainDriver) ClientConnected(cc ftpserver.ClientContext) (string, error) { + if d.isShutdown || !d.shutdownLock.TryRLock() { + return "", errors.New("server has shutdown") + } + defer d.shutdownLock.RUnlock() + d.clients[cc.ID()] = cc + return "AList FTP Endpoint", nil +} + +func (d *FtpMainDriver) ClientDisconnected(cc ftpserver.ClientContext) { + err := cc.Close() + if err != nil { + utils.Log.Errorf("failed to close client: %v", err) + } + delete(d.clients, cc.ID()) +} + +func (d *FtpMainDriver) AuthUser(cc ftpserver.ClientContext, user, pass string) (ftpserver.ClientDriver, error) { + var userObj *model.User + var err error + if user == "anonymous" || user == "guest" { + userObj, err = op.GetGuest() + if err != nil { + return nil, err + } + } else { + userObj, err = op.GetUserByName(user) + if err != nil { + return nil, err + } + passHash := model.StaticHash(pass) + if err = userObj.ValidatePwdStaticHash(passHash); err != nil { + return nil, err + } + } + if userObj.Disabled || !userObj.CanFTPAccess() { + return nil, errors.New("user not allowed to access FTP") + } + + ctx := context.Background() + ctx = context.WithValue(ctx, "user", userObj) + if user == "anonymous" || user == "guest" { + ctx = context.WithValue(ctx, "meta_pass", pass) + } else { + ctx = context.WithValue(ctx, "meta_pass", "") + } + ctx = context.WithValue(ctx, "client_ip", cc.RemoteAddr().String()) + ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader) + return ftp.NewAferoAdapter(ctx), nil +} + +func (d *FtpMainDriver) GetTLSConfig() (*tls.Config, error) { + if d.tlsConfig == nil { + return nil, errors.New("TLS config not provided") + } + return d.tlsConfig, nil +} + +func (d *FtpMainDriver) Stop() { + d.isShutdown = true + d.shutdownLock.Lock() + defer d.shutdownLock.Unlock() + for _, value := range d.clients { + _ = value.Close() + } +} + +func lookupIP(host string) string { + if host == "" || net.ParseIP(host) != nil { + return host + } + ips, err := net.LookupIP(host) + if err != nil || len(ips) == 0 { + utils.Log.Fatalf("given FTP public host is invalid, and the default value will be used: %v", err) + return "" + } + for _, ip := range ips { + if ip.To4() != nil { + return ip.String() + } + } + v6 := ips[0].String() + utils.Log.Warnf("no IPv4 record looked up, %s will be used as public host, and it might do not work.", v6) + return v6 +} + +func newPortMapper(str string) ftpserver.PasvPortGetter { + if str == "" { + return nil + } + pasvPortMappers := strings.Split(strings.Replace(str, "\n", ",", -1), ",") + type group struct { + ExposedStart int + ListenedStart int + Length int + } + groups := make([]group, len(pasvPortMappers)) + totalLength := 0 + convertToPorts := func(str string) (int, int, error) { + start, end, multi := strings.Cut(str, "-") + if multi { + si, err := strconv.Atoi(start) + if err != nil { + return 0, 0, err + } + ei, err := strconv.Atoi(end) + if err != nil { + return 0, 0, err + } + if ei < si || ei < 1024 || si < 1024 || ei > 65535 || si > 65535 { + return 0, 0, errors.New("invalid port") + } + return si, ei - si + 1, nil + } else { + ret, err := strconv.Atoi(str) + if err != nil { + return 0, 0, err + } else { + return ret, 1, nil + } + } + } + for i, mapper := range pasvPortMappers { + var err error + exposed, listened, mapped := strings.Cut(mapper, ":") + for { + if mapped { + var es, ls, el, ll int + es, el, err = convertToPorts(exposed) + if err != nil { + break + } + ls, ll, err = convertToPorts(listened) + if err != nil { + break + } + if el != ll { + err = errors.New("the number of exposed ports and listened ports does not match") + break + } + groups[i].ExposedStart = es + groups[i].ListenedStart = ls + groups[i].Length = el + totalLength += el + } else { + var start, length int + start, length, err = convertToPorts(mapper) + groups[i].ExposedStart = start + groups[i].ListenedStart = start + groups[i].Length = length + totalLength += length + } + break + } + if err != nil { + utils.Log.Fatalf("failed to convert FTP PASV port mapper %s: %v, the port mapper will be ignored.", mapper, err) + return nil + } + } + return func() (int, int, bool) { + idxPort := rand.Intn(totalLength) + for _, g := range groups { + if idxPort >= g.Length { + idxPort -= g.Length + } else { + return g.ExposedStart + idxPort, g.ListenedStart + idxPort, true + } + } + // unreachable + return 0, 0, false + } +} + +func getTlsConf(keyPath, certPath string) (*tls.Config, error) { + if keyPath == "" || certPath == "" { + return nil, errors.New("private key or certificate is not provided") + } + cert, err := os.ReadFile(certPath) + if err != nil { + return nil, err + } + key, err := os.ReadFile(keyPath) + if err != nil { + return nil, err + } + tlsCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + return &tls.Config{Certificates: []tls.Certificate{tlsCert}}, nil +} diff --git a/server/ftp/afero.go b/server/ftp/afero.go new file mode 100644 index 00000000..6eb4bf8e --- /dev/null +++ b/server/ftp/afero.go @@ -0,0 +1,91 @@ +package ftp + +import ( + "context" + "errors" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/spf13/afero" + "os" + "time" +) + +type AferoAdapter struct { + ctx context.Context +} + +func NewAferoAdapter(ctx context.Context) *AferoAdapter { + return &AferoAdapter{ctx: ctx} +} + +func (a *AferoAdapter) Create(_ string) (afero.File, error) { + // See also GetHandle + return nil, errs.NotImplement +} + +func (a *AferoAdapter) Mkdir(name string, _ os.FileMode) error { + return Mkdir(a.ctx, name) +} + +func (a *AferoAdapter) MkdirAll(path string, perm os.FileMode) error { + return a.Mkdir(path, perm) +} + +func (a *AferoAdapter) Open(_ string) (afero.File, error) { + // See also GetHandle and ReadDir + return nil, errs.NotImplement +} + +func (a *AferoAdapter) OpenFile(_ string, _ int, _ os.FileMode) (afero.File, error) { + // See also GetHandle + return nil, errs.NotImplement +} + +func (a *AferoAdapter) Remove(name string) error { + return Remove(a.ctx, name) +} + +func (a *AferoAdapter) RemoveAll(path string) error { + return a.Remove(path) +} + +func (a *AferoAdapter) Rename(oldName, newName string) error { + return Rename(a.ctx, oldName, newName) +} + +func (a *AferoAdapter) Stat(name string) (os.FileInfo, error) { + return Stat(a.ctx, name) +} + +func (a *AferoAdapter) Name() string { + return "AList FTP Endpoint" +} + +func (a *AferoAdapter) Chmod(_ string, _ os.FileMode) error { + return errs.NotSupport +} + +func (a *AferoAdapter) Chown(_ string, _, _ int) error { + return errs.NotSupport +} + +func (a *AferoAdapter) Chtimes(_ string, _ time.Time, _ time.Time) error { + return errs.NotSupport +} + +func (a *AferoAdapter) ReadDir(name string) ([]os.FileInfo, error) { + return List(a.ctx, name) +} + +func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserver.FileTransfer, error) { + if offset != 0 { + return nil, errors.New("offset") + } + if (flags & os.O_APPEND) > 0 { + return nil, errors.New("append") + } + if (flags & os.O_WRONLY) > 0 { + return OpenUpload(a.ctx, name) + } + return OpenDownload(a.ctx, name) +} diff --git a/server/ftp/fsmanage.go b/server/ftp/fsmanage.go new file mode 100644 index 00000000..5199a473 --- /dev/null +++ b/server/ftp/fsmanage.go @@ -0,0 +1,75 @@ +package ftp + +import ( + "context" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/server/common" + "github.com/pkg/errors" + stdpath "path" +) + +func Mkdir(ctx context.Context, path string) error { + user := ctx.Value("user").(*model.User) + reqPath, err := user.JoinPath(path) + if err != nil { + return err + } + if !user.CanWrite() || !user.CanFTPManage() { + meta, err := op.GetNearestMeta(stdpath.Dir(reqPath)) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return err + } + } + if !common.CanWrite(meta, reqPath) { + return errs.PermissionDenied + } + } + return fs.MakeDir(ctx, reqPath) +} + +func Remove(ctx context.Context, path string) error { + user := ctx.Value("user").(*model.User) + if !user.CanRemove() || !user.CanFTPManage() { + return errs.PermissionDenied + } + reqPath, err := user.JoinPath(path) + if err != nil { + return err + } + return fs.Remove(ctx, reqPath) +} + +func Rename(ctx context.Context, oldPath, newPath string) error { + user := ctx.Value("user").(*model.User) + srcPath, err := user.JoinPath(oldPath) + if err != nil { + return err + } + dstPath, err := user.JoinPath(newPath) + if err != nil { + return err + } + srcDir, srcBase := stdpath.Split(srcPath) + dstDir, dstBase := stdpath.Split(dstPath) + if srcDir == dstDir { + if !user.CanRename() || !user.CanFTPManage() { + return errs.PermissionDenied + } + return fs.Rename(ctx, srcPath, dstBase) + } else { + if !user.CanFTPManage() || !user.CanMove() || (srcBase != dstBase && !user.CanRename()) { + return errs.PermissionDenied + } + if err := fs.Move(ctx, srcPath, dstDir); err != nil { + return err + } + if srcBase != dstBase { + return fs.Rename(ctx, stdpath.Join(dstDir, srcBase), dstBase) + } + return nil + } +} diff --git a/server/ftp/fsread.go b/server/ftp/fsread.go new file mode 100644 index 00000000..6a9ba2eb --- /dev/null +++ b/server/ftp/fsread.go @@ -0,0 +1,188 @@ +package ftp + +import ( + "context" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/net" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/common" + "github.com/pkg/errors" + "io" + fs2 "io/fs" + "net/http" + "os" + "time" +) + +type FileDownloadProxy struct { + ftpserver.FileTransfer + reader io.ReadCloser + closers *utils.Closers +} + +func OpenDownload(ctx context.Context, path string) (*FileDownloadProxy, error) { + user := ctx.Value("user").(*model.User) + reqPath, err := user.JoinPath(path) + if err != nil { + return nil, err + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return nil, err + } + } + ctx = context.WithValue(ctx, "meta", meta) + if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) { + return nil, errs.PermissionDenied + } + + // directly use proxy + header := *(ctx.Value("proxy_header").(*http.Header)) + link, obj, err := fs.Link(ctx, reqPath, model.LinkArgs{ + IP: ctx.Value("client_ip").(string), + Header: header, + }) + if err != nil { + return nil, err + } + storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{}) + if err != nil { + return nil, err + } + if storage.GetStorage().ProxyRange { + common.ProxyRange(link, obj.GetSize()) + } + reader, closers, err := proxy(link) + if err != nil { + return nil, err + } + return &FileDownloadProxy{reader: reader, closers: closers}, nil +} + +func proxy(link *model.Link) (io.ReadCloser, *utils.Closers, error) { + if link.MFile != nil { + return link.MFile, nil, nil + } else if link.RangeReadCloser != nil { + rc, err := link.RangeReadCloser.RangeRead(context.Background(), http_range.Range{Length: -1}) + if err != nil { + return nil, nil, err + } + closers := link.RangeReadCloser.GetClosers() + return rc, &closers, nil + } else { + res, err := net.RequestHttp(context.Background(), http.MethodGet, link.Header, link.URL) + if err != nil { + return nil, nil, err + } + return res.Body, nil, nil + } +} + +func (f *FileDownloadProxy) Read(p []byte) (n int, err error) { + return f.reader.Read(p) +} + +func (f *FileDownloadProxy) Write(p []byte) (n int, err error) { + return 0, errs.NotSupport +} + +func (f *FileDownloadProxy) Seek(offset int64, whence int) (int64, error) { + return 0, errs.NotSupport +} + +func (f *FileDownloadProxy) Close() error { + defer func() { + if f.closers != nil { + _ = f.closers.Close() + } + }() + return f.reader.Close() +} + +type OsFileInfoAdapter struct { + obj model.Obj +} + +func (o *OsFileInfoAdapter) Name() string { + return o.obj.GetName() +} + +func (o *OsFileInfoAdapter) Size() int64 { + return o.obj.GetSize() +} + +func (o *OsFileInfoAdapter) Mode() fs2.FileMode { + var mode fs2.FileMode = 0755 + if o.IsDir() { + mode |= fs2.ModeDir + } + return mode +} + +func (o *OsFileInfoAdapter) ModTime() time.Time { + return o.obj.ModTime() +} + +func (o *OsFileInfoAdapter) IsDir() bool { + return o.obj.IsDir() +} + +func (o *OsFileInfoAdapter) Sys() any { + return o.obj +} + +func Stat(ctx context.Context, path string) (os.FileInfo, error) { + user := ctx.Value("user").(*model.User) + reqPath, err := user.JoinPath(path) + if err != nil { + return nil, err + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return nil, err + } + } + ctx = context.WithValue(ctx, "meta", meta) + if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) { + return nil, errs.PermissionDenied + } + obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{}) + if err != nil { + return nil, err + } + return &OsFileInfoAdapter{obj: obj}, nil +} + +func List(ctx context.Context, path string) ([]os.FileInfo, error) { + user := ctx.Value("user").(*model.User) + reqPath, err := user.JoinPath(path) + if err != nil { + return nil, err + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return nil, err + } + } + ctx = context.WithValue(ctx, "meta", meta) + if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) { + return nil, errs.PermissionDenied + } + objs, err := fs.List(ctx, reqPath, &fs.ListArgs{}) + if err != nil { + return nil, err + } + ret := make([]os.FileInfo, len(objs)) + for i, obj := range objs { + ret[i] = &OsFileInfoAdapter{obj: obj} + } + return ret, nil +} diff --git a/server/ftp/fsup.go b/server/ftp/fsup.go new file mode 100644 index 00000000..3042a3d2 --- /dev/null +++ b/server/ftp/fsup.go @@ -0,0 +1,91 @@ +package ftp + +import ( + "context" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/server/common" + "github.com/pkg/errors" + "io" + "net/http" + "os" + stdpath "path" + "time" +) + +type FileUploadProxy struct { + ftpserver.FileTransfer + buffer *os.File + path string + ctx context.Context +} + +func OpenUpload(ctx context.Context, path string) (*FileUploadProxy, error) { + user := ctx.Value("user").(*model.User) + path, err := user.JoinPath(path) + if err != nil { + return nil, err + } + meta, err := op.GetNearestMeta(stdpath.Dir(path)) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return nil, err + } + } + if !(common.CanAccess(user, meta, path, ctx.Value("meta_pass").(string)) && + ((user.CanFTPManage() && user.CanWrite()) || common.CanWrite(meta, stdpath.Dir(path)))) { + return nil, errs.PermissionDenied + } + tmpFile, err := os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return nil, err + } + return &FileUploadProxy{buffer: tmpFile, path: path, ctx: ctx}, nil +} + +func (f *FileUploadProxy) Read(p []byte) (n int, err error) { + return 0, errs.NotSupport +} + +func (f *FileUploadProxy) Write(p []byte) (n int, err error) { + return f.buffer.Write(p) +} + +func (f *FileUploadProxy) Seek(offset int64, whence int) (int64, error) { + return 0, errs.NotSupport +} + +func (f *FileUploadProxy) Close() error { + dir, name := stdpath.Split(f.path) + size, err := f.buffer.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + if _, err := f.buffer.Seek(0, io.SeekStart); err != nil { + return err + } + arr := make([]byte, 512) + if _, err := f.buffer.Read(arr); err != nil { + return err + } + contentType := http.DetectContentType(arr) + if _, err := f.buffer.Seek(0, io.SeekStart); err != nil { + return err + } + s := &stream.FileStream{ + Obj: &model.Object{ + Name: name, + Size: size, + Modified: time.Now(), + }, + Mimetype: contentType, + WebPutAsTask: false, + } + s.SetTmpFile(f.buffer) + return fs.PutDirectly(f.ctx, dir, s, true) +} From ecefa5e0eb61bff2df66cfcbdbf97a14c73cb19f Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Tue, 10 Dec 2024 20:21:51 +0800 Subject: [PATCH 047/187] ci: fix desktop beta release trigger --- .github/workflows/beta_release.yml | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/.github/workflows/beta_release.yml b/.github/workflows/beta_release.yml index 90c2836f..c9cb7475 100644 --- a/.github/workflows/beta_release.yml +++ b/.github/workflows/beta_release.yml @@ -111,14 +111,23 @@ jobs: name: Beta Release Desktop runs-on: ubuntu-latest steps: - - uses: peter-evans/create-or-update-comment@v4 + - name: Checkout repo + uses: actions/checkout@v4 with: - issue-number: 69 - body: | - /release-beta - - triggered by @${{ github.actor }} - - commit sha: ${{ github.sha }} - - view files: https://github.com/alist-org/alist/tree/${{ github.sha }} - reactions: 'rocket' - token: ${{ secrets.MY_TOKEN }} + repository: alist-org/desktop-release + ref: main + persist-credentials: false + fetch-depth: 0 + + - name: Commit + run: | + git config --local user.email "bot@nn.ci" + git config --local user.name "IlaBot" + git commit --allow-empty -m "Trigger build for ${{ github.sha }}" + + - name: Push commit + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.MY_TOKEN }} + branch: main repository: alist-org/desktop-release \ No newline at end of file From 201e25c17fa00e5b8ea1989eebc63a0c7efdefbc Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Thu, 12 Dec 2024 20:50:00 +0800 Subject: [PATCH 048/187] fix(ftp-server): large transfer leads to client timeout (#7639) * fix(ftp-server): client timeout to wait a large file upload to netdisk * fix(ftp-server): driver alist v3 upload failed and temp files do not be deleted --- server/ftp/fsup.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/ftp/fsup.go b/server/ftp/fsup.go index 3042a3d2..35652271 100644 --- a/server/ftp/fsup.go +++ b/server/ftp/fsup.go @@ -87,5 +87,7 @@ func (f *FileUploadProxy) Close() error { WebPutAsTask: false, } s.SetTmpFile(f.buffer) - return fs.PutDirectly(f.ctx, dir, s, true) + s.Closers.Add(f.buffer) + _, err = fs.PutAsTask(f.ctx, dir, s) + return err } From 33ba7f152198c11a1ab0bb2eed192e637c835d02 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Thu, 12 Dec 2024 20:51:43 +0800 Subject: [PATCH 049/187] feat: sftp server support (#7643) * feat: sftp server support * fix(sftp-server): try fix build failed * fix: sftp download lack --- cmd/common.go | 1 + cmd/server.go | 28 +++++++++ go.mod | 12 ++-- go.sum | 27 ++++---- internal/bootstrap/ssh.go | 101 ++++++++++++++++++++++++++++++ internal/conf/config.go | 10 +++ internal/conf/var.go | 3 + server/ftp.go | 7 ++- server/ftp/afero.go | 36 +++++++++-- server/ftp/const.go | 11 ++++ server/ftp/fsup.go | 128 ++++++++++++++++++++++++++++++++++++-- server/ftp/sftp.go | 122 ++++++++++++++++++++++++++++++++++++ server/ftp/site.go | 21 +++++++ server/sftp.go | 109 ++++++++++++++++++++++++++++++++ 14 files changed, 584 insertions(+), 32 deletions(-) create mode 100644 internal/bootstrap/ssh.go create mode 100644 server/ftp/const.go create mode 100644 server/ftp/sftp.go create mode 100644 server/ftp/site.go create mode 100644 server/sftp.go diff --git a/cmd/common.go b/cmd/common.go index b4a7081c..fabc3a90 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -15,6 +15,7 @@ import ( func Init() { bootstrap.InitConfig() bootstrap.Log() + bootstrap.InitHostKey() bootstrap.InitDB() data.InitData() bootstrap.InitIndex() diff --git a/cmd/server.go b/cmd/server.go index 66b57952..3112a6a9 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/KirCute/sftpd-alist" "net" "net/http" "os" @@ -131,6 +132,24 @@ the address is defined in config file`, }() } } + var sftpDriver *server.SftpDriver + var sftpServer *sftpd.SftpServer + if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable { + var err error + sftpDriver, err = server.NewSftpDriver() + if err != nil { + utils.Log.Fatalf("failed to start sftp driver: %s", err.Error()) + } else { + utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen) + go func() { + sftpServer = sftpd.NewSftpServer(sftpDriver) + err = sftpServer.RunServer() + if err != nil { + utils.Log.Fatalf("problem sftp server listening: %s", err.Error()) + } + }() + } + } // Wait for interrupt signal to gracefully shutdown the server with // a timeout of 1 second. quit := make(chan os.Signal, 1) @@ -181,6 +200,15 @@ the address is defined in config file`, } }() } + if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil { + wg.Add(1) + go func() { + defer wg.Done() + if err := sftpServer.Close(); err != nil { + utils.Log.Fatal("SFTP server shutdown err: ", err) + } + }() + } wg.Wait() utils.Log.Println("Server exit") }, diff --git a/go.mod b/go.mod index 259521e9..1deaa1d5 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,8 @@ module github.com/alist-org/alist/v3 go 1.22.4 require ( + github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 + github.com/KirCute/sftpd-alist v0.0.11 github.com/SheltonZhu/115driver v1.0.32 github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4 @@ -60,7 +62,7 @@ require ( github.com/xhofe/tache v0.1.3 github.com/xhofe/wopan-sdk-go v0.1.3 github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 - golang.org/x/crypto v0.27.0 + golang.org/x/crypto v0.30.0 golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e golang.org/x/image v0.19.0 golang.org/x/net v0.28.0 @@ -76,7 +78,6 @@ require ( require ( github.com/BurntSushi/toml v0.3.1 // indirect - github.com/KirCute/ftpserverlib-pasvportmap v0.0.0-20241208190057-c9a7bf2571e2 // indirect github.com/blevesearch/go-faiss v1.0.20 // indirect github.com/blevesearch/zapx/v16 v16.1.5 // indirect github.com/bytedance/sonic/loader v0.1.1 // indirect @@ -90,6 +91,7 @@ require ( github.com/hekmon/cunits/v2 v2.1.0 // indirect github.com/ipfs/boxo v0.12.0 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect ) require ( @@ -223,10 +225,10 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect go.etcd.io/bbolt v1.3.8 // indirect golang.org/x/arch v0.8.0 // indirect - golang.org/x/sync v0.8.0 // indirect + golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.24.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.24.0 // indirect google.golang.org/api v0.169.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect diff --git a/go.sum b/go.sum index dcad05c9..a4e8e12d 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,10 @@ cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2Qx cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/KirCute/ftpserverlib-pasvportmap v0.0.0-20241208190057-c9a7bf2571e2 h1:P3MoQ1kDfbCjL6+MPd5K7wPdKB4nqMuLU6Mv0+tdWDA= -github.com/KirCute/ftpserverlib-pasvportmap v0.0.0-20241208190057-c9a7bf2571e2/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E= +github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po= +github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E= +github.com/KirCute/sftpd-alist v0.0.11 h1:BGInXmmLBI+v6S9WZCwvY0DRK1vDprGNcTv/57p2GSo= +github.com/KirCute/sftpd-alist v0.0.11/go.mod h1:pPFzr6GrKqXvFXLr46ZpoqmtSpwH8DKTYloSp/ybzKQ= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= @@ -492,12 +494,13 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA= +github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 h1:6Y51mutOvRGRx6KqyMNo//xk8B8o6zW9/RVmy1VamOs= +github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543/go.mod h1:jpwqYA8KUVEvSUJHkCXsnBRJCSKP1BMa81QZ6kvRpow= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= @@ -571,8 +574,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -614,8 +617,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -647,8 +650,6 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= @@ -661,8 +662,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -676,8 +677,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= diff --git a/internal/bootstrap/ssh.go b/internal/bootstrap/ssh.go new file mode 100644 index 00000000..ec4a07ac --- /dev/null +++ b/internal/bootstrap/ssh.go @@ -0,0 +1,101 @@ +package bootstrap + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "github.com/alist-org/alist/v3/cmd/flags" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/pkg/utils" + "golang.org/x/crypto/ssh" + "os" + "path/filepath" +) + +func InitHostKey() { + sshPath := filepath.Join(flags.DataDir, "ssh") + if !utils.Exists(sshPath) { + err := utils.CreateNestedDirectory(sshPath) + if err != nil { + utils.Log.Fatalf("failed to create ssh directory: %+v", err) + return + } + } + conf.SSHSigners = make([]ssh.Signer, 0, 4) + if rsaKey, ok := LoadOrGenerateRSAHostKey(sshPath); ok { + conf.SSHSigners = append(conf.SSHSigners, rsaKey) + } + // TODO Add keys for other encryption algorithms +} + +func LoadOrGenerateRSAHostKey(parentDir string) (ssh.Signer, bool) { + privateKeyPath := filepath.Join(parentDir, "ssh_host_rsa_key") + publicKeyPath := filepath.Join(parentDir, "ssh_host_rsa_key.pub") + privateKeyBytes, err := os.ReadFile(privateKeyPath) + if err == nil { + var privateKey *rsa.PrivateKey + privateKey, err = rsaDecodePrivateKey(privateKeyBytes) + if err == nil { + var ret ssh.Signer + ret, err = ssh.NewSignerFromKey(privateKey) + if err == nil { + return ret, true + } + } + } + _ = os.Remove(privateKeyPath) + _ = os.Remove(publicKeyPath) + privateKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + utils.Log.Fatalf("failed to generate RSA private key: %+v", err) + return nil, false + } + publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + utils.Log.Fatalf("failed to generate RSA public key: %+v", err) + return nil, false + } + ret, err := ssh.NewSignerFromKey(privateKey) + if err != nil { + utils.Log.Fatalf("failed to generate RSA signer: %+v", err) + return nil, false + } + privateBytes := rsaEncodePrivateKey(privateKey) + publicBytes := ssh.MarshalAuthorizedKey(publicKey) + err = os.WriteFile(privateKeyPath, privateBytes, 0600) + if err != nil { + utils.Log.Fatalf("failed to write RSA private key to file: %+v", err) + return nil, false + } + err = os.WriteFile(publicKeyPath, publicBytes, 0644) + if err != nil { + _ = os.Remove(privateKeyPath) + utils.Log.Fatalf("failed to write RSA public key to file: %+v", err) + return nil, false + } + return ret, true +} + +func rsaEncodePrivateKey(privateKey *rsa.PrivateKey) []byte { + privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) + privateBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: privateKeyBytes, + } + return pem.EncodeToMemory(privateBlock) +} + +func rsaDecodePrivateKey(bytes []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(bytes) + if block == nil { + return nil, fmt.Errorf("failed to parse PEM block containing the key") + } + privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return privateKey, nil +} diff --git a/internal/conf/config.go b/internal/conf/config.go index df6c0544..6c0ccb2a 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -84,6 +84,11 @@ type FTP struct { EnablePasvConnIPCheck bool `json:"enable_pasv_conn_ip_check" env:"ENABLE_PASV_CONN_IP_CHECK"` } +type SFTP struct { + Enable bool `json:"enable" env:"ENABLE"` + Listen string `json:"listen" env:"LISTEN"` +} + type Config struct { Force bool `json:"force" env:"FORCE"` SiteURL string `json:"site_url" env:"SITE_URL"` @@ -104,6 +109,7 @@ type Config struct { Cors Cors `json:"cors" envPrefix:"CORS_"` S3 S3 `json:"s3" envPrefix:"S3_"` FTP FTP `json:"ftp" envPrefix:"FTP_"` + SFTP SFTP `json:"sftp" envPrefix:"SFTP_"` } func DefaultConfig() *Config { @@ -185,5 +191,9 @@ func DefaultConfig() *Config { EnableActiveConnIPCheck: true, EnablePasvConnIPCheck: true, }, + SFTP: SFTP{ + Enable: true, + Listen: ":5222", + }, } } diff --git a/internal/conf/var.go b/internal/conf/var.go index 0a8eb16f..b7277e41 100644 --- a/internal/conf/var.go +++ b/internal/conf/var.go @@ -1,6 +1,7 @@ package conf import ( + "golang.org/x/crypto/ssh" "net/url" "regexp" ) @@ -32,3 +33,5 @@ var ( ManageHtml string IndexHtml string ) + +var SSHSigners []ssh.Signer diff --git a/server/ftp.go b/server/ftp.go index 161ea63c..4d507b68 100644 --- a/server/ftp.go +++ b/server/ftp.go @@ -70,7 +70,7 @@ func NewMainDriver() (*FtpMainDriver, error) { Banner: setting.GetStr(conf.Announcement), TLSRequired: tlsRequired, DisableLISTArgs: false, - DisableSite: true, + DisableSite: false, DisableActiveMode: conf.Conf.FTP.DisableActiveMode, EnableHASH: false, DisableSTAT: false, @@ -79,6 +79,9 @@ func NewMainDriver() (*FtpMainDriver, error) { DefaultTransferType: transferType, ActiveConnectionsCheck: activeConnCheck, PasvConnectionsCheck: pasvConnCheck, + SiteHandlers: map[string]ftpserver.SiteHandler{ + "SIZE": ftp.HandleSIZE, + }, }, proxyHeader: header, clients: make(map[uint32]ftpserver.ClientContext), @@ -128,7 +131,7 @@ func (d *FtpMainDriver) AuthUser(cc ftpserver.ClientContext, user, pass string) } } if userObj.Disabled || !userObj.CanFTPAccess() { - return nil, errors.New("user not allowed to access FTP") + return nil, errors.New("user is not allowed to access via FTP") } ctx := context.Background() diff --git a/server/ftp/afero.go b/server/ftp/afero.go index 6eb4bf8e..866ad8c0 100644 --- a/server/ftp/afero.go +++ b/server/ftp/afero.go @@ -5,13 +5,15 @@ import ( "errors" ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" "github.com/spf13/afero" "os" "time" ) type AferoAdapter struct { - ctx context.Context + ctx context.Context + nextFileSize int64 } func NewAferoAdapter(ctx context.Context) *AferoAdapter { @@ -78,14 +80,36 @@ func (a *AferoAdapter) ReadDir(name string) ([]os.FileInfo, error) { } func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserver.FileTransfer, error) { + fileSize := a.nextFileSize + a.nextFileSize = 0 if offset != 0 { - return nil, errors.New("offset") + return nil, errs.NotSupport } - if (flags & os.O_APPEND) > 0 { - return nil, errors.New("append") + if (flags & os.O_SYNC) != 0 { + return nil, errs.NotSupport } - if (flags & os.O_WRONLY) > 0 { - return OpenUpload(a.ctx, name) + if (flags & os.O_APPEND) != 0 { + return nil, errs.NotSupport + } + _, err := fs.Get(a.ctx, name, &fs.GetArgs{}) + exists := err == nil + if (flags&os.O_CREATE) == 0 && !exists { + return nil, errs.ObjectNotFound + } + if (flags&os.O_EXCL) != 0 && exists { + return nil, errors.New("file already exists") + } + if (flags & os.O_WRONLY) != 0 { + trunc := (flags & os.O_TRUNC) != 0 + if fileSize > 0 { + return OpenUploadWithLength(a.ctx, name, trunc, fileSize) + } else { + return OpenUpload(a.ctx, name, trunc) + } } return OpenDownload(a.ctx, name) } + +func (a *AferoAdapter) SetNextFileSize(size int64) { + a.nextFileSize = size +} diff --git a/server/ftp/const.go b/server/ftp/const.go new file mode 100644 index 00000000..1fd14e82 --- /dev/null +++ b/server/ftp/const.go @@ -0,0 +1,11 @@ +package ftp + +// From leffss/sftpd +const ( + SSH_FXF_READ = 0x00000001 + SSH_FXF_WRITE = 0x00000002 + SSH_FXF_APPEND = 0x00000004 + SSH_FXF_CREAT = 0x00000008 + SSH_FXF_TRUNC = 0x00000010 + SSH_FXF_EXCL = 0x00000020 +) diff --git a/server/ftp/fsup.go b/server/ftp/fsup.go index 35652271..f18c13c2 100644 --- a/server/ftp/fsup.go +++ b/server/ftp/fsup.go @@ -1,6 +1,7 @@ package ftp import ( + "bytes" "context" ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" "github.com/alist-org/alist/v3/internal/conf" @@ -23,29 +24,38 @@ type FileUploadProxy struct { buffer *os.File path string ctx context.Context + trunc bool } -func OpenUpload(ctx context.Context, path string) (*FileUploadProxy, error) { +func uploadAuth(ctx context.Context, path string) error { user := ctx.Value("user").(*model.User) path, err := user.JoinPath(path) if err != nil { - return nil, err + return err } meta, err := op.GetNearestMeta(stdpath.Dir(path)) if err != nil { if !errors.Is(errors.Cause(err), errs.MetaNotFound) { - return nil, err + return err } } if !(common.CanAccess(user, meta, path, ctx.Value("meta_pass").(string)) && ((user.CanFTPManage() && user.CanWrite()) || common.CanWrite(meta, stdpath.Dir(path)))) { - return nil, errs.PermissionDenied + return errs.PermissionDenied + } + return nil +} + +func OpenUpload(ctx context.Context, path string, trunc bool) (*FileUploadProxy, error) { + err := uploadAuth(ctx, path) + if err != nil { + return nil, err } tmpFile, err := os.CreateTemp(conf.Conf.TempDir, "file-*") if err != nil { return nil, err } - return &FileUploadProxy{buffer: tmpFile, path: path, ctx: ctx}, nil + return &FileUploadProxy{buffer: tmpFile, path: path, ctx: ctx, trunc: trunc}, nil } func (f *FileUploadProxy) Read(p []byte) (n int, err error) { @@ -77,6 +87,9 @@ func (f *FileUploadProxy) Close() error { if _, err := f.buffer.Seek(0, io.SeekStart); err != nil { return err } + if f.trunc { + _ = fs.Remove(f.ctx, f.path) + } s := &stream.FileStream{ Obj: &model.Object{ Name: name, @@ -84,10 +97,113 @@ func (f *FileUploadProxy) Close() error { Modified: time.Now(), }, Mimetype: contentType, - WebPutAsTask: false, + WebPutAsTask: true, } s.SetTmpFile(f.buffer) s.Closers.Add(f.buffer) _, err = fs.PutAsTask(f.ctx, dir, s) return err } + +type FileUploadWithLengthProxy struct { + ftpserver.FileTransfer + ctx context.Context + path string + length int64 + first512Bytes [512]byte + pFirst int + pipeWriter io.WriteCloser + errChan chan error +} + +func OpenUploadWithLength(ctx context.Context, path string, trunc bool, length int64) (*FileUploadWithLengthProxy, error) { + err := uploadAuth(ctx, path) + if err != nil { + return nil, err + } + if trunc { + _ = fs.Remove(ctx, path) + } + return &FileUploadWithLengthProxy{ctx: ctx, path: path, length: length}, nil +} + +func (f *FileUploadWithLengthProxy) Read(p []byte) (n int, err error) { + return 0, errs.NotSupport +} + +func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) { + if f.pipeWriter != nil { + select { + case e := <-f.errChan: + return 0, e + default: + return f.pipeWriter.Write(p) + } + } else if len(p) < 512-f.pFirst { + copy(f.first512Bytes[f.pFirst:], p) + f.pFirst += len(p) + return len(p), nil + } else { + copy(f.first512Bytes[f.pFirst:], p[:512-f.pFirst]) + contentType := http.DetectContentType(f.first512Bytes[:]) + dir, name := stdpath.Split(f.path) + reader, writer := io.Pipe() + f.errChan = make(chan error, 1) + s := &stream.FileStream{ + Obj: &model.Object{ + Name: name, + Size: f.length, + Modified: time.Now(), + }, + Mimetype: contentType, + WebPutAsTask: false, + Reader: reader, + } + go func() { + e := fs.PutDirectly(f.ctx, dir, s, true) + f.errChan <- e + close(f.errChan) + }() + f.pipeWriter = writer + n, err = writer.Write(f.first512Bytes[:]) + if err != nil { + return n, err + } + n1, err := writer.Write(p[512-f.pFirst:]) + if err != nil { + return n1 + 512 - f.pFirst, err + } + f.pFirst = 512 + return len(p), nil + } +} + +func (f *FileUploadWithLengthProxy) Seek(offset int64, whence int) (int64, error) { + return 0, errs.NotSupport +} + +func (f *FileUploadWithLengthProxy) Close() error { + if f.pipeWriter != nil { + err := f.pipeWriter.Close() + if err != nil { + return err + } + err = <-f.errChan + return err + } else { + data := f.first512Bytes[:f.pFirst] + contentType := http.DetectContentType(data) + dir, name := stdpath.Split(f.path) + s := &stream.FileStream{ + Obj: &model.Object{ + Name: name, + Size: int64(f.pFirst), + Modified: time.Now(), + }, + Mimetype: contentType, + WebPutAsTask: false, + Reader: bytes.NewReader(data), + } + return fs.PutDirectly(f.ctx, dir, s, true) + } +} diff --git a/server/ftp/sftp.go b/server/ftp/sftp.go new file mode 100644 index 00000000..0a11ee18 --- /dev/null +++ b/server/ftp/sftp.go @@ -0,0 +1,122 @@ +package ftp + +import ( + "github.com/KirCute/sftpd-alist" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "os" +) + +type SftpDriverAdapter struct { + FtpDriver *AferoAdapter +} + +func (s *SftpDriverAdapter) OpenFile(_ string, _ uint32, _ *sftpd.Attr) (sftpd.File, error) { + // See also GetHandle + return nil, errs.NotImplement +} + +func (s *SftpDriverAdapter) OpenDir(_ string) (sftpd.Dir, error) { + // See also GetHandle + return nil, errs.NotImplement +} + +func (s *SftpDriverAdapter) Remove(name string) error { + return s.FtpDriver.Remove(name) +} + +func (s *SftpDriverAdapter) Rename(old, new string, _ uint32) error { + return s.FtpDriver.Rename(old, new) +} + +func (s *SftpDriverAdapter) Mkdir(name string, attr *sftpd.Attr) error { + return s.FtpDriver.Mkdir(name, attr.Mode) +} + +func (s *SftpDriverAdapter) Rmdir(name string) error { + return s.Remove(name) +} + +func (s *SftpDriverAdapter) Stat(name string, _ bool) (*sftpd.Attr, error) { + stat, err := s.FtpDriver.Stat(name) + if err != nil { + return nil, err + } + return fileInfoToSftpAttr(stat), nil +} + +func (s *SftpDriverAdapter) SetStat(_ string, _ *sftpd.Attr) error { + return errs.NotSupport +} + +func (s *SftpDriverAdapter) ReadLink(_ string) (string, error) { + return "", errs.NotSupport +} + +func (s *SftpDriverAdapter) CreateLink(_, _ string, _ uint32) error { + return errs.NotSupport +} + +func (s *SftpDriverAdapter) RealPath(path string) (string, error) { + return utils.FixAndCleanPath(path), nil +} + +func (s *SftpDriverAdapter) GetHandle(name string, flags uint32, _ *sftpd.Attr, offset uint64) (sftpd.FileTransfer, error) { + return s.FtpDriver.GetHandle(name, sftpFlagToOpenMode(flags), int64(offset)) +} + +func (s *SftpDriverAdapter) ReadDir(name string) ([]sftpd.NamedAttr, error) { + dir, err := s.FtpDriver.ReadDir(name) + if err != nil { + return nil, err + } + ret := make([]sftpd.NamedAttr, len(dir)) + for i, d := range dir { + ret[i] = *fileInfoToSftpNamedAttr(d) + } + return ret, nil +} + +// From leffss/sftpd +func sftpFlagToOpenMode(flags uint32) int { + mode := 0 + if (flags & SSH_FXF_READ) != 0 { + mode |= os.O_RDONLY + } + if (flags & SSH_FXF_WRITE) != 0 { + mode |= os.O_WRONLY + } + if (flags & SSH_FXF_APPEND) != 0 { + mode |= os.O_APPEND + } + if (flags & SSH_FXF_CREAT) != 0 { + mode |= os.O_CREATE + } + if (flags & SSH_FXF_TRUNC) != 0 { + mode |= os.O_TRUNC + } + if (flags & SSH_FXF_EXCL) != 0 { + mode |= os.O_EXCL + } + return mode +} + +func fileInfoToSftpAttr(stat os.FileInfo) *sftpd.Attr { + ret := &sftpd.Attr{} + ret.Flags |= sftpd.ATTR_SIZE + ret.Size = uint64(stat.Size()) + ret.Flags |= sftpd.ATTR_MODE + ret.Mode = stat.Mode() + ret.Flags |= sftpd.ATTR_TIME + ret.ATime = stat.Sys().(model.Obj).CreateTime() + ret.MTime = stat.ModTime() + return ret +} + +func fileInfoToSftpNamedAttr(stat os.FileInfo) *sftpd.NamedAttr { + return &sftpd.NamedAttr{ + Name: stat.Name(), + Attr: *fileInfoToSftpAttr(stat), + } +} diff --git a/server/ftp/site.go b/server/ftp/site.go new file mode 100644 index 00000000..8ea667d8 --- /dev/null +++ b/server/ftp/site.go @@ -0,0 +1,21 @@ +package ftp + +import ( + "fmt" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "strconv" +) + +func HandleSIZE(param string, client ftpserver.ClientDriver) (int, string) { + fs, ok := client.(*AferoAdapter) + if !ok { + return ftpserver.StatusNotLoggedIn, "Unexpected exception (driver is nil)" + } + size, err := strconv.ParseInt(param, 10, 64) + if err != nil { + return ftpserver.StatusSyntaxErrorParameters, fmt.Sprintf( + "Couldn't parse file size, given: %s, err: %v", param, err) + } + fs.SetNextFileSize(size) + return ftpserver.StatusOK, "Accepted next file size" +} diff --git a/server/sftp.go b/server/sftp.go new file mode 100644 index 00000000..3b07d472 --- /dev/null +++ b/server/sftp.go @@ -0,0 +1,109 @@ +package server + +import ( + "context" + "github.com/KirCute/sftpd-alist" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/ftp" + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + "net/http" +) + +type SftpDriver struct { + proxyHeader *http.Header + config *sftpd.Config +} + +func NewSftpDriver() (*SftpDriver, error) { + header := &http.Header{} + header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent)) + return &SftpDriver{ + proxyHeader: header, + }, nil +} + +func (d *SftpDriver) GetConfig() *sftpd.Config { + if d.config != nil { + return d.config + } + serverConfig := ssh.ServerConfig{ + NoClientAuth: true, + NoClientAuthCallback: d.NoClientAuth, + PasswordCallback: d.PasswordAuth, + AuthLogCallback: d.AuthLogCallback, + BannerCallback: d.GetBanner, + } + for _, k := range conf.SSHSigners { + serverConfig.AddHostKey(k) + } + d.config = &sftpd.Config{ + ServerConfig: serverConfig, + HostPort: conf.Conf.SFTP.Listen, + ErrorLogFunc: utils.Log.Error, + //DebugLogFunc: utils.Log.Debugf, + } + return d.config +} + +func (d *SftpDriver) GetFileSystem(sc *ssh.ServerConn) (sftpd.FileSystem, error) { + userObj, err := op.GetUserByName(sc.User()) + if err != nil { + return nil, err + } + ctx := context.Background() + ctx = context.WithValue(ctx, "user", userObj) + ctx = context.WithValue(ctx, "meta_pass", "") + ctx = context.WithValue(ctx, "client_ip", sc.RemoteAddr().String()) + ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader) + return &ftp.SftpDriverAdapter{FtpDriver: ftp.NewAferoAdapter(ctx)}, nil +} + +func (d *SftpDriver) Close() { +} + +func (d *SftpDriver) NoClientAuth(conn ssh.ConnMetadata) (*ssh.Permissions, error) { + if conn.User() != "guest" { + return nil, errors.New("only guest is allowed to login without authorization") + } + guest, err := op.GetGuest() + if err != nil { + return nil, err + } + if guest.Disabled || !guest.CanFTPAccess() { + return nil, errors.New("user is not allowed to access via SFTP") + } + return nil, nil +} + +func (d *SftpDriver) PasswordAuth(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) { + userObj, err := op.GetUserByName(conn.User()) + if err != nil { + return nil, err + } + passHash := model.StaticHash(string(password)) + if err = userObj.ValidatePwdStaticHash(passHash); err != nil { + return nil, err + } + if userObj.Disabled || !userObj.CanFTPAccess() { + return nil, errors.New("user is not allowed to access via SFTP") + } + return nil, nil +} + +func (d *SftpDriver) AuthLogCallback(conn ssh.ConnMetadata, method string, err error) { + ip := conn.RemoteAddr().String() + if err == nil { + utils.Log.Infof("[SFTP] %s(%s) logged in via %s", conn.User(), ip, method) + } else if method != "none" { + utils.Log.Infof("[SFTP] %s(%s) tries logging in via %s but with error: %s", conn.User(), ip, method, err) + } +} + +func (d *SftpDriver) GetBanner(_ ssh.ConnMetadata) string { + return setting.GetStr(conf.Announcement) +} From cf58ab3a78aea2e97053fc3fbf8b0050f420604a Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Thu, 12 Dec 2024 21:04:14 +0800 Subject: [PATCH 050/187] chore(config): disable FTP and SFTP by default --- internal/conf/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/conf/config.go b/internal/conf/config.go index 6c0ccb2a..a9b38242 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -180,7 +180,7 @@ func DefaultConfig() *Config { SSL: false, }, FTP: FTP{ - Enable: true, + Enable: false, Listen: ":5221", FindPasvPortAttempts: 50, ActiveTransferPortNon20: false, @@ -192,7 +192,7 @@ func DefaultConfig() *Config { EnablePasvConnIPCheck: true, }, SFTP: SFTP{ - Enable: true, + Enable: false, Listen: ":5222", }, } From 331885ed64860c58b7556f7ac3d46a5eae875ce6 Mon Sep 17 00:00:00 2001 From: hshpy Date: Tue, 17 Dec 2024 22:04:27 +0800 Subject: [PATCH 051/187] fix(net): close of closed channel (#7580) --- internal/net/request.go | 5 +---- internal/net/serve.go | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/internal/net/request.go b/internal/net/request.go index c0f547ba..1a7405e4 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -169,9 +169,7 @@ func (d *downloader) sendChunkTask() *chunk { // when the final reader Close, we interrupt func (d *downloader) interrupt() error { - if d.chunkChannel == nil { - return nil - } + d.cancel() if d.written != d.params.Range.Length { log.Debugf("Downloader interrupt before finish") @@ -181,7 +179,6 @@ func (d *downloader) interrupt() error { } defer func() { close(d.chunkChannel) - d.chunkChannel = nil for _, buf := range d.bufs { buf.Close() } diff --git a/internal/net/serve.go b/internal/net/serve.go index 0eb8cbb8..e85f61a8 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -174,7 +174,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time pw.Close() }() } - defer sendContent.Close() + //defer sendContent.Close() w.Header().Set("Accept-Ranges", "bytes") if w.Header().Get("Content-Encoding") == "" { From b8bd14f99b3cccdddbd7f8d3b841655892ffa89c Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Tue, 17 Dec 2024 22:05:52 +0800 Subject: [PATCH 052/187] fix(lanzou): missing parameter (#7678 close #7210) --- drivers/lanzou/help.go | 4 ++-- drivers/lanzou/util.go | 22 ++++++++++++++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/lanzou/help.go b/drivers/lanzou/help.go index 31a558e9..81d7c567 100644 --- a/drivers/lanzou/help.go +++ b/drivers/lanzou/help.go @@ -120,9 +120,9 @@ var findKVReg = regexp.MustCompile(`'(.+?)':('?([^' },]*)'?)`) // 拆分kv func findJSVarFunc(key, data string) string { var values []string if key != "sasign" { - values = regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data) + values = regexp.MustCompile(`var ` + key + `\s*=\s*['"]?(.+?)['"]?;`).FindStringSubmatch(data) } else { - matches := regexp.MustCompile(`var `+key+` = '(.+?)';`).FindAllStringSubmatch(data, -1) + matches := regexp.MustCompile(`var `+key+`\s*=\s*['"]?(.+?)['"]?;`).FindAllStringSubmatch(data, -1) if len(matches) == 3 { values = matches[1] } else { diff --git a/drivers/lanzou/util.go b/drivers/lanzou/util.go index abc2c400..4b9959ad 100644 --- a/drivers/lanzou/util.go +++ b/drivers/lanzou/util.go @@ -264,6 +264,9 @@ var findSubFolderReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/ // 获取下载页面链接 var findDownPageParamReg = regexp.MustCompile(` 1 { + fileID = fileIDs[1] + } else { + return nil, fmt.Errorf("not find file id") + } var resp FileShareInfoAndUrlResp[string] - _, err = d.post(d.ShareUrl+"/ajaxm.php", func(req *resty.Request) { req.SetFormData(param) }, &resp) + _, err = d.post(d.ShareUrl+"/ajaxm.php?file="+fileID, func(req *resty.Request) { req.SetFormData(param) }, &resp) if err != nil { return nil, err } @@ -381,8 +392,15 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) ( return nil, err } + fileIDs := findFileIDReg.FindStringSubmatch(nextPageData) + var fileID string + if len(fileIDs) > 1 { + fileID = fileIDs[1] + } else { + return nil, fmt.Errorf("not find file id") + } var resp FileShareInfoAndUrlResp[int] - _, err = d.post(d.ShareUrl+"/ajaxm.php", func(req *resty.Request) { req.SetFormData(param) }, &resp) + _, err = d.post(d.ShareUrl+"/ajaxm.php?file="+fileID, func(req *resty.Request) { req.SetFormData(param) }, &resp) if err != nil { return nil, err } From db9922412611f6d546f723586ec67ce587666478 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Wed, 25 Dec 2024 21:08:22 +0800 Subject: [PATCH 053/187] =?UTF-8?q?perf:=20Speed=20=E2=80=8B=E2=80=8Bof=20?= =?UTF-8?q?database=20initialization=20(#7694)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * perf: 优化非sqlite3数据库时初始化慢的问题 * refactor --- internal/bootstrap/data/setting.go | 48 ++++++++++++++++++------------ internal/bootstrap/db.go | 18 +++++------ internal/op/setting.go | 8 ++--- server/common/base.go | 8 ++--- 4 files changed, 46 insertions(+), 36 deletions(-) diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index 206273b4..bcb64f79 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -3,6 +3,7 @@ package data import ( "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/db" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/offline_download/tool" "github.com/alist-org/alist/v3/internal/op" @@ -21,17 +22,19 @@ func initSettings() { if err != nil { utils.Log.Fatalf("failed get settings: %+v", err) } - for i := range settings { - if !isActive(settings[i].Key) && settings[i].Flag != model.DEPRECATED { - settings[i].Flag = model.DEPRECATED - err = op.SaveSettingItem(&settings[i]) + settingMap := map[string]*model.SettingItem{} + for _, v := range settings { + if !isActive(v.Key) && v.Flag != model.DEPRECATED { + v.Flag = model.DEPRECATED + err = op.SaveSettingItem(&v) if err != nil { utils.Log.Fatalf("failed save setting: %+v", err) } } + settingMap[v.Key] = &v } - // create or save setting + save := false for i := range initialSettingItems { item := &initialSettingItems[i] item.Index = uint(i) @@ -39,26 +42,33 @@ func initSettings() { item.PreDefault = item.Value } // err - stored, err := op.GetSettingItemByKey(item.Key) - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - utils.Log.Fatalf("failed get setting: %+v", err) - continue + stored, ok := settingMap[item.Key] + if !ok { + stored, err = op.GetSettingItemByKey(item.Key) + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + utils.Log.Fatalf("failed get setting: %+v", err) + continue + } } - // save if stored != nil && item.Key != conf.VERSION && stored.Value != item.PreDefault { item.Value = stored.Value } + _, err = op.HandleSettingItemHook(item) + if err != nil { + utils.Log.Errorf("failed to execute hook on %s: %+v", item.Key, err) + continue + } + // save if stored == nil || *item != *stored { - err = op.SaveSettingItem(item) - if err != nil { - utils.Log.Fatalf("failed save setting: %+v", err) - } + save = true + } + } + if save { + err = db.SaveSettingItems(initialSettingItems) + if err != nil { + utils.Log.Fatalf("failed save setting: %+v", err) } else { - // Not save so needs to execute hook - _, err = op.HandleSettingItemHook(item) - if err != nil { - utils.Log.Errorf("failed to execute hook on %s: %+v", item.Key, err) - } + op.SettingCacheUpdate() } } } diff --git a/internal/bootstrap/db.go b/internal/bootstrap/db.go index 5dfa2820..39b659b7 100644 --- a/internal/bootstrap/db.go +++ b/internal/bootstrap/db.go @@ -56,20 +56,20 @@ func InitDB() { } case "mysql": { - //[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] - dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local&tls=%s", - database.User, database.Password, database.Host, database.Port, database.Name, database.SSLMode) - if database.DSN != "" { - dsn = database.DSN + dsn := database.DSN + if dsn == "" { + //[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] + dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local&tls=%s", + database.User, database.Password, database.Host, database.Port, database.Name, database.SSLMode) } dB, err = gorm.Open(mysql.Open(dsn), gormConfig) } case "postgres": { - dsn := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai", - database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode) - if database.DSN != "" { - dsn = database.DSN + dsn := database.DSN + if dsn == "" { + dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai", + database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode) } dB, err = gorm.Open(postgres.Open(dsn), gormConfig) } diff --git a/internal/op/setting.go b/internal/op/setting.go index 83d19c12..50eba3f7 100644 --- a/internal/op/setting.go +++ b/internal/op/setting.go @@ -26,7 +26,7 @@ var settingGroupCacheF = func(key string, item []model.SettingItem) { settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour)) } -func settingCacheUpdate() { +func SettingCacheUpdate() { settingCache.Clear() settingGroupCache.Clear() } @@ -167,7 +167,7 @@ func SaveSettingItems(items []model.SettingItem) error { } } if len(errs) < len(items)-len(noHookItems)+1 { - settingCacheUpdate() + SettingCacheUpdate() } return utils.MergeErrors(errs...) } @@ -181,7 +181,7 @@ func SaveSettingItem(item *model.SettingItem) (err error) { if err = db.SaveSettingItem(item); err != nil { return err } - settingCacheUpdate() + SettingCacheUpdate() return nil } @@ -193,6 +193,6 @@ func DeleteSettingItemByKey(key string) error { if !old.IsDeprecated() { return errors.Errorf("setting [%s] is not deprecated", key) } - settingCacheUpdate() + SettingCacheUpdate() return db.DeleteSettingItemByKey(key) } diff --git a/server/common/base.go b/server/common/base.go index eb6ef2b8..11a28d25 100644 --- a/server/common/base.go +++ b/server/common/base.go @@ -12,16 +12,16 @@ import ( func GetApiUrl(r *http.Request) string { api := conf.Conf.SiteURL if strings.HasPrefix(api, "http") { - return api + return strings.TrimSuffix(api, "/") } if r != nil { protocol := "http" if r.TLS != nil || r.Header.Get("X-Forwarded-Proto") == "https" { protocol = "https" } - host := r.Host - if r.Header.Get("X-Forwarded-Host") != "" { - host = r.Header.Get("X-Forwarded-Host") + host := r.Header.Get("X-Forwarded-Host") + if host == "" { + host = r.Host } api = fmt.Sprintf("%s://%s", protocol, stdpath.Join(host, api)) } From d7aa1608ac2f3834af87b4a81b8f0970b3dadbc0 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Wed, 25 Dec 2024 21:09:54 +0800 Subject: [PATCH 054/187] feat(task): add speed monitor (#7655) --- internal/fs/copy.go | 17 ++++--- internal/fs/fs.go | 4 +- internal/fs/put.go | 11 ++-- internal/offline_download/115/client.go | 1 + internal/offline_download/aria2/aria2.go | 7 +-- internal/offline_download/http/client.go | 1 + internal/offline_download/pikpak/pikpak.go | 5 ++ internal/offline_download/qbit/qbit.go | 1 + internal/offline_download/tool/add.go | 4 +- internal/offline_download/tool/base.go | 11 ++-- internal/offline_download/tool/download.go | 16 ++++-- internal/offline_download/tool/transfer.go | 6 ++- .../offline_download/transmission/client.go | 1 + internal/task/base.go | 50 ++++++++++++++++--- server/handles/fsmanage.go | 2 +- server/handles/fsup.go | 4 +- server/handles/offline_download.go | 2 +- server/handles/task.go | 17 +++++-- 18 files changed, 116 insertions(+), 44 deletions(-) diff --git a/internal/fs/copy.go b/internal/fs/copy.go index d4ad452b..c3fadaab 100644 --- a/internal/fs/copy.go +++ b/internal/fs/copy.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" stdpath "path" + "time" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" @@ -18,7 +19,7 @@ import ( ) type CopyTask struct { - task.TaskWithCreator + task.TaskExtension Status string `json:"-"` //don't save status to save space SrcObjPath string `json:"src_path"` DstDirPath string `json:"dst_path"` @@ -37,6 +38,9 @@ func (t *CopyTask) GetStatus() string { } func (t *CopyTask) Run() error { + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() var err error if t.srcStorage == nil { t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp) @@ -54,7 +58,7 @@ var CopyTaskManager *tache.Manager[*CopyTask] // Copy if in the same storage, call move method // if not, add copy task -func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskInfoWithCreator, error) { +func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) { srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath) if err != nil { return nil, errors.WithMessage(err, "failed get src storage") @@ -93,9 +97,9 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool } } // not in the same storage - taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed + taskCreator, _ := ctx.Value("user").(*model.User) t := &CopyTask{ - TaskWithCreator: task.TaskWithCreator{ + TaskExtension: task.TaskExtension{ Creator: taskCreator, }, srcStorage: srcStorage, @@ -128,8 +132,8 @@ func copyBetween2Storages(t *CopyTask, srcStorage, dstStorage driver.Driver, src srcObjPath := stdpath.Join(srcObjPath, obj.GetName()) dstObjPath := stdpath.Join(dstDirPath, srcObj.GetName()) CopyTaskManager.Add(&CopyTask{ - TaskWithCreator: task.TaskWithCreator{ - Creator: t.Creator, + TaskExtension: task.TaskExtension{ + Creator: t.GetCreator(), }, srcStorage: srcStorage, dstStorage: dstStorage, @@ -150,6 +154,7 @@ func copyFileBetween2Storages(tsk *CopyTask, srcStorage, dstStorage driver.Drive if err != nil { return errors.WithMessagef(err, "failed get src [%s] file", srcFilePath) } + tsk.SetTotalBytes(srcFile.GetSize()) link, _, err := op.Link(tsk.Ctx(), srcStorage, srcFilePath, model.LinkArgs{ Header: http.Header{}, }) diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 65e5a2c2..24f1d47f 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -69,7 +69,7 @@ func Move(ctx context.Context, srcPath, dstDirPath string, lazyCache ...bool) er return err } -func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskInfoWithCreator, error) { +func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) { res, err := _copy(ctx, srcObjPath, dstDirPath, lazyCache...) if err != nil { log.Errorf("failed copy %s to %s: %+v", srcObjPath, dstDirPath, err) @@ -101,7 +101,7 @@ func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer return err } -func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskInfoWithCreator, error) { +func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskExtensionInfo, error) { t, err := putAsTask(ctx, dstDirPath, file) if err != nil { log.Errorf("failed put %s: %+v", dstDirPath, err) diff --git a/internal/fs/put.go b/internal/fs/put.go index 23197f5b..bc33a3ac 100644 --- a/internal/fs/put.go +++ b/internal/fs/put.go @@ -10,10 +10,11 @@ import ( "github.com/alist-org/alist/v3/internal/task" "github.com/pkg/errors" "github.com/xhofe/tache" + "time" ) type UploadTask struct { - task.TaskWithCreator + task.TaskExtension storage driver.Driver dstDirActualPath string file model.FileStreamer @@ -28,13 +29,16 @@ func (t *UploadTask) GetStatus() string { } func (t *UploadTask) Run() error { + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() return op.Put(t.Ctx(), t.storage, t.dstDirActualPath, t.file, t.SetProgress, true) } var UploadTaskManager *tache.Manager[*UploadTask] // putAsTask add as a put task and return immediately -func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskInfoWithCreator, error) { +func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskExtensionInfo, error) { storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) if err != nil { return nil, errors.WithMessage(err, "failed get storage") @@ -52,13 +56,14 @@ func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) } taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed t := &UploadTask{ - TaskWithCreator: task.TaskWithCreator{ + TaskExtension: task.TaskExtension{ Creator: taskCreator, }, storage: storage, dstDirActualPath: dstDirActualPath, file: file, } + t.SetTotalBytes(file.GetSize()) UploadTaskManager.Add(t) return t, nil } diff --git a/internal/offline_download/115/client.go b/internal/offline_download/115/client.go index 0ebf38ff..45f147db 100644 --- a/internal/offline_download/115/client.go +++ b/internal/offline_download/115/client.go @@ -107,6 +107,7 @@ func (p *Cloud115) Status(task *tool.DownloadTask) (*tool.Status, error) { s.Progress = t.Percent s.Status = t.GetStatus() s.Completed = t.IsDone() + s.TotalBytes = t.Size if t.IsFailed() { s.Err = fmt.Errorf(t.GetStatus()) } diff --git a/internal/offline_download/aria2/aria2.go b/internal/offline_download/aria2/aria2.go index d22b32f9..fb212b35 100644 --- a/internal/offline_download/aria2/aria2.go +++ b/internal/offline_download/aria2/aria2.go @@ -82,7 +82,7 @@ func (a *Aria2) Status(task *tool.DownloadTask) (*tool.Status, error) { if err != nil { return nil, err } - total, err := strconv.ParseUint(info.TotalLength, 10, 64) + total, err := strconv.ParseInt(info.TotalLength, 10, 64) if err != nil { total = 0 } @@ -91,8 +91,9 @@ func (a *Aria2) Status(task *tool.DownloadTask) (*tool.Status, error) { downloaded = 0 } s := &tool.Status{ - Completed: info.Status == "complete", - Err: err, + Completed: info.Status == "complete", + Err: err, + TotalBytes: total, } s.Progress = float64(downloaded) / float64(total) * 100 if len(info.FollowedBy) != 0 { diff --git a/internal/offline_download/http/client.go b/internal/offline_download/http/client.go index 6f22fcf7..9b83400e 100644 --- a/internal/offline_download/http/client.go +++ b/internal/offline_download/http/client.go @@ -83,6 +83,7 @@ func (s SimpleHttp) Run(task *tool.DownloadTask) error { } defer file.Close() fileSize := resp.ContentLength + task.SetTotalBytes(fileSize) err = utils.CopyWithCtx(task.Ctx(), file, resp.Body, fileSize, task.SetProgress) return err } diff --git a/internal/offline_download/pikpak/pikpak.go b/internal/offline_download/pikpak/pikpak.go index 618b1442..f07b3de8 100644 --- a/internal/offline_download/pikpak/pikpak.go +++ b/internal/offline_download/pikpak/pikpak.go @@ -3,6 +3,7 @@ package pikpak import ( "context" "fmt" + "strconv" "github.com/alist-org/alist/v3/drivers/pikpak" "github.com/alist-org/alist/v3/internal/errs" @@ -105,6 +106,10 @@ func (p *PikPak) Status(task *tool.DownloadTask) (*tool.Status, error) { s.Progress = float64(t.Progress) s.Status = t.Message s.Completed = (t.Phase == "PHASE_TYPE_COMPLETE") + s.TotalBytes, err = strconv.ParseInt(t.FileSize, 10, 64) + if err != nil { + s.TotalBytes = 0 + } if t.Phase == "PHASE_TYPE_ERROR" { s.Err = fmt.Errorf(t.Message) } diff --git a/internal/offline_download/qbit/qbit.go b/internal/offline_download/qbit/qbit.go index 807ebfef..458de03f 100644 --- a/internal/offline_download/qbit/qbit.go +++ b/internal/offline_download/qbit/qbit.go @@ -64,6 +64,7 @@ func (a *QBittorrent) Status(task *tool.DownloadTask) (*tool.Status, error) { return nil, err } s := &tool.Status{} + s.TotalBytes = info.Size s.Progress = float64(info.Completed) / float64(info.Size) * 100 switch info.State { case qbittorrent.UPLOADING, qbittorrent.PAUSEDUP, qbittorrent.QUEUEDUP, qbittorrent.STALLEDUP, qbittorrent.FORCEDUP, qbittorrent.CHECKINGUP: diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go index 1c9da146..42349e2e 100644 --- a/internal/offline_download/tool/add.go +++ b/internal/offline_download/tool/add.go @@ -29,7 +29,7 @@ type AddURLArgs struct { DeletePolicy DeletePolicy } -func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskInfoWithCreator, error) { +func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, error) { // get tool tool, err := Tools.Get(args.Tool) if err != nil { @@ -81,7 +81,7 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskInfoWithCreator, er taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed t := &DownloadTask{ - TaskWithCreator: task.TaskWithCreator{ + TaskExtension: task.TaskExtension{ Creator: taskCreator, }, Url: args.URL, diff --git a/internal/offline_download/tool/base.go b/internal/offline_download/tool/base.go index 3b9fb07a..ae9eac26 100644 --- a/internal/offline_download/tool/base.go +++ b/internal/offline_download/tool/base.go @@ -16,11 +16,12 @@ type AddUrlArgs struct { } type Status struct { - Progress float64 - NewGID string - Completed bool - Status string - Err error + TotalBytes int64 + Progress float64 + NewGID string + Completed bool + Status string + Err error } type Tool interface { diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go index 038baf96..a0f1a81b 100644 --- a/internal/offline_download/tool/download.go +++ b/internal/offline_download/tool/download.go @@ -14,7 +14,7 @@ import ( ) type DownloadTask struct { - task.TaskWithCreator + task.TaskExtension Url string `json:"url"` DstDirPath string `json:"dst_dir_path"` TempDir string `json:"temp_dir"` @@ -28,6 +28,9 @@ type DownloadTask struct { } func (t *DownloadTask) Run() error { + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() if t.tool == nil { tool, err := Tools.Get(t.Toolname) if err != nil { @@ -131,6 +134,7 @@ func (t *DownloadTask) Update() (bool, error) { } t.callStatusRetried = 0 t.SetProgress(info.Progress) + t.SetTotalBytes(info.TotalBytes) t.Status = fmt.Sprintf("[%s]: %s", t.tool.Name(), info.Status) if info.NewGID != "" { log.Debugf("followen by: %+v", info.NewGID) @@ -171,16 +175,18 @@ func (t *DownloadTask) Complete() error { // upload files for i := range files { file := files[i] - TransferTaskManager.Add(&TransferTask{ - TaskWithCreator: task.TaskWithCreator{ - Creator: t.Creator, + tsk := &TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: t.GetCreator(), }, file: file, DstDirPath: t.DstDirPath, TempDir: t.TempDir, DeletePolicy: t.DeletePolicy, FileDir: file.Path, - }) + } + tsk.SetTotalBytes(file.Size) + TransferTaskManager.Add(tsk) } return nil } diff --git a/internal/offline_download/tool/transfer.go b/internal/offline_download/tool/transfer.go index 085b4a66..a77c4822 100644 --- a/internal/offline_download/tool/transfer.go +++ b/internal/offline_download/tool/transfer.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "time" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" @@ -16,7 +17,7 @@ import ( ) type TransferTask struct { - task.TaskWithCreator + task.TaskExtension FileDir string `json:"file_dir"` DstDirPath string `json:"dst_dir_path"` TempDir string `json:"temp_dir"` @@ -25,6 +26,9 @@ type TransferTask struct { } func (t *TransferTask) Run() error { + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() // check dstDir again var err error if (t.file == File{}) { diff --git a/internal/offline_download/transmission/client.go b/internal/offline_download/transmission/client.go index a6075414..4131f3e1 100644 --- a/internal/offline_download/transmission/client.go +++ b/internal/offline_download/transmission/client.go @@ -150,6 +150,7 @@ func (t *Transmission) Status(task *tool.DownloadTask) (*tool.Status, error) { Err: err, } s.Progress = *info.PercentDone * 100 + s.TotalBytes = int64(*info.SizeWhenDone / 8) switch *info.Status { case transmissionrpc.TorrentStatusCheckWait, diff --git a/internal/task/base.go b/internal/task/base.go index a30e5987..93f413a7 100644 --- a/internal/task/base.go +++ b/internal/task/base.go @@ -3,24 +3,58 @@ package task import ( "github.com/alist-org/alist/v3/internal/model" "github.com/xhofe/tache" + "time" ) -type TaskWithCreator struct { +type TaskExtension struct { tache.Base - Creator *model.User + Creator *model.User + startTime *time.Time + endTime *time.Time + totalBytes int64 } -func (t *TaskWithCreator) SetCreator(creator *model.User) { +func (t *TaskExtension) SetCreator(creator *model.User) { t.Creator = creator t.Persist() } -func (t *TaskWithCreator) GetCreator() *model.User { +func (t *TaskExtension) GetCreator() *model.User { return t.Creator } -type TaskInfoWithCreator interface { - tache.TaskWithInfo - SetCreator(creator *model.User) - GetCreator() *model.User +func (t *TaskExtension) SetStartTime(startTime time.Time) { + t.startTime = &startTime +} + +func (t *TaskExtension) GetStartTime() *time.Time { + return t.startTime +} + +func (t *TaskExtension) SetEndTime(endTime time.Time) { + t.endTime = &endTime +} + +func (t *TaskExtension) GetEndTime() *time.Time { + return t.endTime +} + +func (t *TaskExtension) ClearEndTime() { + t.endTime = nil +} + +func (t *TaskExtension) SetTotalBytes(totalBytes int64) { + t.totalBytes = totalBytes +} + +func (t *TaskExtension) GetTotalBytes() int64 { + return t.totalBytes +} + +type TaskExtensionInfo interface { + tache.TaskWithInfo + GetCreator() *model.User + GetStartTime() *time.Time + GetEndTime() *time.Time + GetTotalBytes() int64 } diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go index 42d53d7e..9877b127 100644 --- a/server/handles/fsmanage.go +++ b/server/handles/fsmanage.go @@ -121,7 +121,7 @@ func FsCopy(c *gin.Context) { common.ErrorResp(c, err, 403) return } - var addedTasks []task.TaskInfoWithCreator + var addedTasks []task.TaskExtensionInfo for i, name := range req.Names { t, err := fs.Copy(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1) if t != nil { diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 3a366d49..a17c50f0 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -57,7 +57,7 @@ func FsStream(c *gin.Context) { Mimetype: c.GetHeader("Content-Type"), WebPutAsTask: asTask, } - var t task.TaskInfoWithCreator + var t task.TaskExtensionInfo if asTask { t, err = fs.PutAsTask(c, dir, s) } else { @@ -122,7 +122,7 @@ func FsForm(c *gin.Context) { Mimetype: file.Header.Get("Content-Type"), WebPutAsTask: asTask, } - var t task.TaskInfoWithCreator + var t task.TaskExtensionInfo if asTask { s.Reader = struct { io.Reader diff --git a/server/handles/offline_download.go b/server/handles/offline_download.go index ff1fcfa0..9e26030a 100644 --- a/server/handles/offline_download.go +++ b/server/handles/offline_download.go @@ -133,7 +133,7 @@ func AddOfflineDownload(c *gin.Context) { common.ErrorResp(c, err, 403) return } - var tasks []task.TaskInfoWithCreator + var tasks []task.TaskExtensionInfo for _, url := range req.Urls { t, err := tool.AddURL(c, &tool.AddURLArgs{ URL: url, diff --git a/server/handles/task.go b/server/handles/task.go index 5f996505..c7d9ef48 100644 --- a/server/handles/task.go +++ b/server/handles/task.go @@ -4,6 +4,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/task" "math" + "time" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/offline_download/tool" @@ -21,10 +22,13 @@ type TaskInfo struct { State tache.State `json:"state"` Status string `json:"status"` Progress float64 `json:"progress"` + StartTime *time.Time `json:"start_time"` + EndTime *time.Time `json:"end_time"` + TotalBytes int64 `json:"total_bytes"` Error string `json:"error"` } -func getTaskInfo[T task.TaskInfoWithCreator](task T) TaskInfo { +func getTaskInfo[T task.TaskExtensionInfo](task T) TaskInfo { errMsg := "" if task.GetErr() != nil { errMsg = task.GetErr().Error() @@ -48,11 +52,14 @@ func getTaskInfo[T task.TaskInfoWithCreator](task T) TaskInfo { State: task.GetState(), Status: task.GetStatus(), Progress: progress, + StartTime: task.GetStartTime(), + EndTime: task.GetEndTime(), + TotalBytes: task.GetTotalBytes(), Error: errMsg, } } -func getTaskInfos[T task.TaskInfoWithCreator](tasks []T) []TaskInfo { +func getTaskInfos[T task.TaskExtensionInfo](tasks []T) []TaskInfo { return utils.MustSliceConvert(tasks, getTaskInfo[T]) } @@ -68,7 +75,7 @@ func getUserInfo(c *gin.Context) (bool, uint, bool) { } } -func getTargetedHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc { +func getTargetedHandler[T task.TaskExtensionInfo](manager *tache.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc { return func(c *gin.Context) { isAdmin, uid, ok := getUserInfo(c) if !ok { @@ -90,7 +97,7 @@ func getTargetedHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], c } } -func getBatchHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], callback func(task T)) gin.HandlerFunc { +func getBatchHandler[T task.TaskExtensionInfo](manager *tache.Manager[T], callback func(task T)) gin.HandlerFunc { return func(c *gin.Context) { isAdmin, uid, ok := getUserInfo(c) if !ok { @@ -115,7 +122,7 @@ func getBatchHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], call } } -func taskRoute[T task.TaskInfoWithCreator](g *gin.RouterGroup, manager *tache.Manager[T]) { +func taskRoute[T task.TaskExtensionInfo](g *gin.RouterGroup, manager *tache.Manager[T]) { g.GET("/undone", func(c *gin.Context) { isAdmin, uid, ok := getUserInfo(c) if !ok { From bb2aec20e4b611ab2242c6b1dc793c337cf47d95 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Wed, 25 Dec 2024 21:11:05 +0800 Subject: [PATCH 055/187] fix(139): handle upload file conflicts (#7692) --- drivers/139/driver.go | 193 ++++++++++++++++++++++++++++-------------- drivers/139/types.go | 1 + 2 files changed, 132 insertions(+), 62 deletions(-) diff --git a/drivers/139/driver.go b/drivers/139/driver.go index 8862983c..dd154efe 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -552,7 +552,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr firstPartInfos = firstPartInfos[:100] } - // 获取上传信息和前100个分片的上传地址 + // 创建任务,获取上传信息和前100个分片的上传地址 data := base.Json{ "contentHash": fullHash, "contentHashAlgorithm": "SHA256", @@ -572,87 +572,156 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return err } - if resp.Data.Exist || resp.Data.RapidUpload { + // 判断文件是否已存在 + // resp.Data.Exist: true 已存在同名文件且校验相同,云端不会重复增加文件,无需手动处理冲突 + if resp.Data.Exist { return nil } - uploadPartInfos := resp.Data.PartInfos + // 判断文件是否支持快传 + // resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址 + // 快传的情况下同样需要手动处理冲突 + if resp.Data.PartInfos != nil { + // 读取前100个分片的上传地址 + uploadPartInfos := resp.Data.PartInfos - // 获取后续分片的上传地址 - for i := 101; i < len(partInfos); i += 100 { - end := i + 100 - if end > len(partInfos) { - end = len(partInfos) - } - batchPartInfos := partInfos[i:end] + // 获取后续分片的上传地址 + for i := 101; i < len(partInfos); i += 100 { + end := i + 100 + if end > len(partInfos) { + end = len(partInfos) + } + batchPartInfos := partInfos[i:end] - moredata := base.Json{ - "fileId": resp.Data.FileId, - "uploadId": resp.Data.UploadId, - "partInfos": batchPartInfos, - "commonAccountInfo": base.Json{ - "account": d.Account, - "accountType": 1, - }, + moredata := base.Json{ + "fileId": resp.Data.FileId, + "uploadId": resp.Data.UploadId, + "partInfos": batchPartInfos, + "commonAccountInfo": base.Json{ + "account": d.Account, + "accountType": 1, + }, + } + pathname := "/hcy/file/getUploadUrl" + var moreresp PersonalUploadUrlResp + _, err = d.personalPost(pathname, moredata, &moreresp) + if err != nil { + return err + } + uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...) } - pathname := "/hcy/file/getUploadUrl" - var moreresp PersonalUploadUrlResp - _, err = d.personalPost(pathname, moredata, &moreresp) + + // Progress + p := driver.NewProgress(stream.GetSize(), up) + + // 上传所有分片 + for _, uploadPartInfo := range uploadPartInfos { + index := uploadPartInfo.PartNumber - 1 + partSize := partInfos[index].PartSize + log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos)) + limitReader := io.LimitReader(stream, partSize) + + // Update Progress + r := io.TeeReader(limitReader, p) + + req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.Header.Set("Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", fmt.Sprint(partSize)) + req.Header.Set("Origin", "https://yun.139.com") + req.Header.Set("Referer", "https://yun.139.com/") + req.ContentLength = partSize + + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + _ = res.Body.Close() + log.Debugf("[139] uploaded: %+v", res) + if res.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", res.StatusCode) + } + } + + data = base.Json{ + "contentHash": fullHash, + "contentHashAlgorithm": "SHA256", + "fileId": resp.Data.FileId, + "uploadId": resp.Data.UploadId, + } + _, err = d.personalPost("/hcy/file/complete", data, nil) if err != nil { return err } - uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...) } - // Progress - p := driver.NewProgress(stream.GetSize(), up) - - // 上传所有分片 - for _, uploadPartInfo := range uploadPartInfos { - index := uploadPartInfo.PartNumber - 1 - partSize := partInfos[index].PartSize - log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos)) - limitReader := io.LimitReader(stream, partSize) - - // Update Progress - r := io.TeeReader(limitReader, p) - - req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r) + // 处理冲突 + if resp.Data.FileName != stream.GetName() { + log.Debugf("[139] conflict detected: %s != %s", resp.Data.FileName, stream.GetName()) + // 给服务器一定时间处理数据,避免无法刷新文件列表 + time.Sleep(time.Millisecond * 500) + // 刷新并获取文件列表 + files, err := d.List(ctx, dstDir, model.ListArgs{Refresh: true}) if err != nil { return err } - req = req.WithContext(ctx) - req.Header.Set("Content-Type", "application/octet-stream") - req.Header.Set("Content-Length", fmt.Sprint(partSize)) - req.Header.Set("Origin", "https://yun.139.com") - req.Header.Set("Referer", "https://yun.139.com/") - req.ContentLength = partSize - - res, err := base.HttpClient.Do(req) - if err != nil { - return err + // 删除旧文件 + for _, file := range files { + if file.GetName() == stream.GetName() { + log.Debugf("[139] conflict: removing old: %s", file.GetName()) + // 删除前重命名旧文件,避免仍旧冲突 + err = d.Rename(ctx, file, stream.GetName()+random.String(4)) + if err != nil { + return err + } + err = d.Remove(ctx, file) + if err != nil { + return err + } + break + } } - _ = res.Body.Close() - log.Debugf("[139] uploaded: %+v", res) - if res.StatusCode != http.StatusOK { - return fmt.Errorf("unexpected status code: %d", res.StatusCode) + // 重命名新文件 + for _, file := range files { + if file.GetName() == resp.Data.FileName { + log.Debugf("[139] conflict: renaming new: %s => %s", file.GetName(), stream.GetName()) + err = d.Rename(ctx, file, stream.GetName()) + if err != nil { + return err + } + break + } } } - - data = base.Json{ - "contentHash": fullHash, - "contentHashAlgorithm": "SHA256", - "fileId": resp.Data.FileId, - "uploadId": resp.Data.UploadId, - } - _, err = d.personalPost("/hcy/file/complete", data, nil) - if err != nil { - return err - } return nil case MetaPersonal: fallthrough case MetaFamily: + // 处理冲突 + // 获取文件列表 + files, err := d.List(ctx, dstDir, model.ListArgs{}) + if err != nil { + return err + } + // 删除旧文件 + for _, file := range files { + if file.GetName() == stream.GetName() { + log.Debugf("[139] conflict: removing old: %s", file.GetName()) + // 删除前重命名旧文件,避免仍旧冲突 + err = d.Rename(ctx, file, stream.GetName()+random.String(4)) + if err != nil { + return err + } + err = d.Remove(ctx, file) + if err != nil { + return err + } + break + } + } data := base.Json{ "manualRename": 2, "operation": 0, @@ -688,7 +757,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr pathname = "/orchestration/familyCloud-rebuild/content/v1.0/getFileUploadURL" } var resp UploadResp - _, err := d.post(pathname, data, &resp) + _, err = d.post(pathname, data, &resp) if err != nil { return err } diff --git a/drivers/139/types.go b/drivers/139/types.go index c34cba03..ac7079d8 100644 --- a/drivers/139/types.go +++ b/drivers/139/types.go @@ -261,6 +261,7 @@ type PersonalUploadResp struct { BaseResp Data struct { FileId string `json:"fileId"` + FileName string `json:"fileName"` PartInfos []PersonalPartInfo `json:"partInfos"` Exist bool `json:"exist"` RapidUpload bool `json:"rapidUpload"` From 6aaf5975c6651df36329860ab5be5a2a058396d7 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Wed, 25 Dec 2024 21:11:36 +0800 Subject: [PATCH 056/187] fix(ftp-server): work unproperly when base url is not root (#7693) * fix(ftp-server): work unproperly when base url is not root * fix: avoid merge conflict --- server/ftp/afero.go | 14 ++++++++++---- server/ftp/fsread.go | 6 +----- server/ftp/fsup.go | 4 ---- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/server/ftp/afero.go b/server/ftp/afero.go index 866ad8c0..448744b1 100644 --- a/server/ftp/afero.go +++ b/server/ftp/afero.go @@ -6,6 +6,7 @@ import ( ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" "github.com/spf13/afero" "os" "time" @@ -91,7 +92,12 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve if (flags & os.O_APPEND) != 0 { return nil, errs.NotSupport } - _, err := fs.Get(a.ctx, name, &fs.GetArgs{}) + user := a.ctx.Value("user").(*model.User) + path, err := user.JoinPath(name) + if err != nil { + return nil, err + } + _, err = fs.Get(a.ctx, path, &fs.GetArgs{}) exists := err == nil if (flags&os.O_CREATE) == 0 && !exists { return nil, errs.ObjectNotFound @@ -102,12 +108,12 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve if (flags & os.O_WRONLY) != 0 { trunc := (flags & os.O_TRUNC) != 0 if fileSize > 0 { - return OpenUploadWithLength(a.ctx, name, trunc, fileSize) + return OpenUploadWithLength(a.ctx, path, trunc, fileSize) } else { - return OpenUpload(a.ctx, name, trunc) + return OpenUpload(a.ctx, path, trunc) } } - return OpenDownload(a.ctx, name) + return OpenDownload(a.ctx, path) } func (a *AferoAdapter) SetNextFileSize(size int64) { diff --git a/server/ftp/fsread.go b/server/ftp/fsread.go index 6a9ba2eb..91f87bf4 100644 --- a/server/ftp/fsread.go +++ b/server/ftp/fsread.go @@ -25,12 +25,8 @@ type FileDownloadProxy struct { closers *utils.Closers } -func OpenDownload(ctx context.Context, path string) (*FileDownloadProxy, error) { +func OpenDownload(ctx context.Context, reqPath string) (*FileDownloadProxy, error) { user := ctx.Value("user").(*model.User) - reqPath, err := user.JoinPath(path) - if err != nil { - return nil, err - } meta, err := op.GetNearestMeta(reqPath) if err != nil { if !errors.Is(errors.Cause(err), errs.MetaNotFound) { diff --git a/server/ftp/fsup.go b/server/ftp/fsup.go index f18c13c2..96c84681 100644 --- a/server/ftp/fsup.go +++ b/server/ftp/fsup.go @@ -29,10 +29,6 @@ type FileUploadProxy struct { func uploadAuth(ctx context.Context, path string) error { user := ctx.Value("user").(*model.User) - path, err := user.JoinPath(path) - if err != nil { - return err - } meta, err := op.GetNearestMeta(stdpath.Dir(path)) if err != nil { if !errors.Is(errors.Cause(err), errs.MetaNotFound) { From b72e85a73a44537678d59e34820a0ec694a52eec Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Wed, 25 Dec 2024 21:11:45 +0800 Subject: [PATCH 057/187] fix(ftp-server): rewrite download in a more appropriate method (#7656) --- server/ftp/fsread.go | 46 ++++++++------------------------------------ 1 file changed, 8 insertions(+), 38 deletions(-) diff --git a/server/ftp/fsread.go b/server/ftp/fsread.go index 91f87bf4..74d184b6 100644 --- a/server/ftp/fsread.go +++ b/server/ftp/fsread.go @@ -6,10 +6,8 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/internal/op" - "github.com/alist-org/alist/v3/pkg/http_range" - "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/server/common" "github.com/pkg/errors" "io" @@ -21,8 +19,7 @@ import ( type FileDownloadProxy struct { ftpserver.FileTransfer - reader io.ReadCloser - closers *utils.Closers + reader io.ReadCloser } func OpenDownload(ctx context.Context, reqPath string) (*FileDownloadProxy, error) { @@ -47,37 +44,15 @@ func OpenDownload(ctx context.Context, reqPath string) (*FileDownloadProxy, erro if err != nil { return nil, err } - storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{}) + fileStream := stream.FileStream{ + Obj: obj, + Ctx: ctx, + } + ss, err := stream.NewSeekableStream(fileStream, link) if err != nil { return nil, err } - if storage.GetStorage().ProxyRange { - common.ProxyRange(link, obj.GetSize()) - } - reader, closers, err := proxy(link) - if err != nil { - return nil, err - } - return &FileDownloadProxy{reader: reader, closers: closers}, nil -} - -func proxy(link *model.Link) (io.ReadCloser, *utils.Closers, error) { - if link.MFile != nil { - return link.MFile, nil, nil - } else if link.RangeReadCloser != nil { - rc, err := link.RangeReadCloser.RangeRead(context.Background(), http_range.Range{Length: -1}) - if err != nil { - return nil, nil, err - } - closers := link.RangeReadCloser.GetClosers() - return rc, &closers, nil - } else { - res, err := net.RequestHttp(context.Background(), http.MethodGet, link.Header, link.URL) - if err != nil { - return nil, nil, err - } - return res.Body, nil, nil - } + return &FileDownloadProxy{reader: ss}, nil } func (f *FileDownloadProxy) Read(p []byte) (n int, err error) { @@ -93,11 +68,6 @@ func (f *FileDownloadProxy) Seek(offset int64, whence int) (int64, error) { } func (f *FileDownloadProxy) Close() error { - defer func() { - if f.closers != nil { - _ = f.closers.Close() - } - }() return f.reader.Close() } From 40b0e66efec91b08b3ea09fdaff8943ae0bdbb5f Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Wed, 25 Dec 2024 21:12:30 +0800 Subject: [PATCH 058/187] feat(ftp-server): treat moving across file systems as copying (#7704 close #7701) * feat(ftp-server): treat moving across file systems as copying * fix: ensure compatibility across different fs on the same driver --- server/ftp/fsmanage.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/server/ftp/fsmanage.go b/server/ftp/fsmanage.go index 5199a473..fb03c1b9 100644 --- a/server/ftp/fsmanage.go +++ b/server/ftp/fsmanage.go @@ -2,6 +2,7 @@ package ftp import ( "context" + "fmt" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" @@ -64,8 +65,14 @@ func Rename(ctx context.Context, oldPath, newPath string) error { if !user.CanFTPManage() || !user.CanMove() || (srcBase != dstBase && !user.CanRename()) { return errs.PermissionDenied } - if err := fs.Move(ctx, srcPath, dstDir); err != nil { - return err + if err = fs.Move(ctx, srcPath, dstDir); err != nil { + if srcBase != dstBase { + return err + } + if _, err1 := fs.Copy(ctx, srcPath, dstDir); err1 != nil { + return fmt.Errorf("failed move for %+v, and failed try copying for %+v", err, err1) + } + return nil } if srcBase != dstBase { return fs.Rename(ctx, stdpath.Join(dstDir, srcBase), dstBase) From 221cdf3611ae317f9c5a7071a8bee28fac0b176e Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Wed, 25 Dec 2024 21:13:23 +0800 Subject: [PATCH 059/187] feat(s3): support custom host presign (#7699 close #7696) --- drivers/s3/driver.go | 8 ++++++-- drivers/s3/meta.go | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/s3/driver.go b/drivers/s3/driver.go index 2b72d789..82c050a1 100644 --- a/drivers/s3/driver.go +++ b/drivers/s3/driver.go @@ -99,8 +99,12 @@ func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*mo var link model.Link var err error if d.CustomHost != "" { - err = req.Build() - link.URL = req.HTTPRequest.URL.String() + if d.EnableCustomHostPresign { + link.URL, err = req.Presign(time.Hour * time.Duration(d.SignURLExpire)) + } else { + err = req.Build() + link.URL = req.HTTPRequest.URL.String() + } if d.RemoveBucket { link.URL = strings.Replace(link.URL, "/"+d.Bucket, "", 1) } diff --git a/drivers/s3/meta.go b/drivers/s3/meta.go index 4436c615..4de4b60a 100644 --- a/drivers/s3/meta.go +++ b/drivers/s3/meta.go @@ -14,6 +14,7 @@ type Addition struct { SecretAccessKey string `json:"secret_access_key" required:"true"` SessionToken string `json:"session_token"` CustomHost string `json:"custom_host"` + EnableCustomHostPresign bool `json:"enable_custom_host_presign"` SignURLExpire int `json:"sign_url_expire" type:"number" default:"4"` Placeholder string `json:"placeholder"` ForcePathStyle bool `json:"force_path_style"` From db5c601cfe3c816735bcbc034b66757772a6b9e0 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Wed, 25 Dec 2024 21:13:54 +0800 Subject: [PATCH 060/187] fix(crypt): add sign to thumbnail (#6611) --- drivers/crypt/driver.go | 7 ++++++- drivers/local/driver.go | 8 ++++---- server/common/common.go | 9 +++++++++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index b0325db4..b6115896 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -13,6 +13,7 @@ import ( "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" @@ -160,7 +161,11 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ // discarding hash as it's encrypted } if d.Thumbnail && thumb == "" { - thumb = utils.EncodePath(common.GetApiUrl(nil)+stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true) + thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp") + thumb = fmt.Sprintf("%s/d%s?sign=%s", + common.GetApiUrl(common.GetHttpReq(ctx)), + utils.EncodePath(thumbPath, true), + sign.Sign(thumbPath)) } if !ok && !d.Thumbnail { result = append(result, &objRes) diff --git a/drivers/local/driver.go b/drivers/local/driver.go index 229c8692..2519232e 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -101,17 +101,17 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ if !d.ShowHidden && strings.HasPrefix(f.Name(), ".") { continue } - file := d.FileInfoToObj(f, args.ReqPath, fullPath) + file := d.FileInfoToObj(ctx, f, args.ReqPath, fullPath) files = append(files, file) } return files, nil } -func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) model.Obj { +func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string, fullPath string) model.Obj { thumb := "" if d.Thumbnail { typeName := utils.GetFileType(f.Name()) if typeName == conf.IMAGE || typeName == conf.VIDEO { - thumb = common.GetApiUrl(nil) + stdpath.Join("/d", reqPath, f.Name()) + thumb = common.GetApiUrl(common.GetHttpReq(ctx)) + stdpath.Join("/d", reqPath, f.Name()) thumb = utils.EncodePath(thumb, true) thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(reqPath, f.Name())) } @@ -149,7 +149,7 @@ func (d *Local) GetMeta(ctx context.Context, path string) (model.Obj, error) { if err != nil { return nil, err } - file := d.FileInfoToObj(f, path, path) + file := d.FileInfoToObj(ctx, f, path, path) //h := "123123" //if s, ok := f.(model.SetHash); ok && file.GetHash() == ("","") { // s.SetHash(h,"SHA1") diff --git a/server/common/common.go b/server/common/common.go index 28d2da44..e231ffe6 100644 --- a/server/common/common.go +++ b/server/common/common.go @@ -1,6 +1,8 @@ package common import ( + "context" + "net/http" "strings" "github.com/alist-org/alist/v3/cmd/flags" @@ -80,3 +82,10 @@ func SuccessResp(c *gin.Context, data ...interface{}) { Data: data[0], }) } + +func GetHttpReq(ctx context.Context) *http.Request { + if c, ok := ctx.(*gin.Context); ok { + return c.Request + } + return nil +} From 77d0c78bfd0e7040db459940312372e9a4813b05 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Wed, 25 Dec 2024 21:15:06 +0800 Subject: [PATCH 061/187] feat(sftp-server): public key login (#7668) --- internal/db/db.go | 2 +- internal/db/sshkey.go | 57 ++++++++++++++++++ internal/model/sshkey.go | 28 +++++++++ internal/op/sshkey.go | 48 +++++++++++++++ server/handles/sshkey.go | 124 +++++++++++++++++++++++++++++++++++++++ server/router.go | 5 ++ server/sftp.go | 27 ++++++++- 7 files changed, 289 insertions(+), 2 deletions(-) create mode 100644 internal/db/sshkey.go create mode 100644 internal/model/sshkey.go create mode 100644 internal/op/sshkey.go create mode 100644 server/handles/sshkey.go diff --git a/internal/db/db.go b/internal/db/db.go index 2df58d37..2cd18050 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -12,7 +12,7 @@ var db *gorm.DB func Init(d *gorm.DB) { db = d - err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem)) + err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey)) if err != nil { log.Fatalf("failed migrate database: %s", err.Error()) } diff --git a/internal/db/sshkey.go b/internal/db/sshkey.go new file mode 100644 index 00000000..f51dbfdc --- /dev/null +++ b/internal/db/sshkey.go @@ -0,0 +1,57 @@ +package db + +import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/pkg/errors" +) + +func GetSSHPublicKeyByUserId(userId uint, pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) { + keyDB := db.Model(&model.SSHPublicKey{}) + query := model.SSHPublicKey{UserId: userId} + if err := keyDB.Where(query).Count(&count).Error; err != nil { + return nil, 0, errors.Wrapf(err, "failed get user's keys count") + } + if err := keyDB.Where(query).Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&keys).Error; err != nil { + return nil, 0, errors.Wrapf(err, "failed get find user's keys") + } + return keys, count, nil +} + +func GetSSHPublicKeyById(id uint) (*model.SSHPublicKey, error) { + var k model.SSHPublicKey + if err := db.First(&k, id).Error; err != nil { + return nil, errors.Wrapf(err, "failed get old key") + } + return &k, nil +} + +func GetSSHPublicKeyByUserTitle(userId uint, title string) (*model.SSHPublicKey, error) { + key := model.SSHPublicKey{UserId: userId, Title: title} + if err := db.Where(key).First(&key).Error; err != nil { + return nil, errors.Wrapf(err, "failed find key with title of user") + } + return &key, nil +} + +func CreateSSHPublicKey(k *model.SSHPublicKey) error { + return errors.WithStack(db.Create(k).Error) +} + +func UpdateSSHPublicKey(k *model.SSHPublicKey) error { + return errors.WithStack(db.Save(k).Error) +} + +func GetSSHPublicKeys(pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) { + keyDB := db.Model(&model.SSHPublicKey{}) + if err := keyDB.Count(&count).Error; err != nil { + return nil, 0, errors.Wrapf(err, "failed get keys count") + } + if err := keyDB.Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&keys).Error; err != nil { + return nil, 0, errors.Wrapf(err, "failed get find keys") + } + return keys, count, nil +} + +func DeleteSSHPublicKeyById(id uint) error { + return errors.WithStack(db.Delete(&model.SSHPublicKey{}, id).Error) +} diff --git a/internal/model/sshkey.go b/internal/model/sshkey.go new file mode 100644 index 00000000..6e97c103 --- /dev/null +++ b/internal/model/sshkey.go @@ -0,0 +1,28 @@ +package model + +import ( + "golang.org/x/crypto/ssh" + "time" +) + +type SSHPublicKey struct { + ID uint `json:"id" gorm:"primaryKey"` + UserId uint `json:"-"` + Title string `json:"title"` + Fingerprint string `json:"fingerprint"` + KeyStr string `gorm:"type:text" json:"-"` + AddedTime time.Time `json:"added_time"` + LastUsedTime time.Time `json:"last_used_time"` +} + +func (k *SSHPublicKey) GetKey() (ssh.PublicKey, error) { + pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.KeyStr)) + if err != nil { + return nil, err + } + return pubKey, nil +} + +func (k *SSHPublicKey) UpdateLastUsedTime() { + k.LastUsedTime = time.Now() +} diff --git a/internal/op/sshkey.go b/internal/op/sshkey.go new file mode 100644 index 00000000..6ed55658 --- /dev/null +++ b/internal/op/sshkey.go @@ -0,0 +1,48 @@ +package op + +import ( + "github.com/alist-org/alist/v3/internal/db" + "github.com/alist-org/alist/v3/internal/model" + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + "time" +) + +func CreateSSHPublicKey(k *model.SSHPublicKey) (error, bool) { + _, err := db.GetSSHPublicKeyByUserTitle(k.UserId, k.Title) + if err == nil { + return errors.New("key with the same title already exists"), true + } + pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.KeyStr)) + if err != nil { + return err, false + } + k.KeyStr = string(pubKey.Marshal()) + k.Fingerprint = ssh.FingerprintSHA256(pubKey) + k.AddedTime = time.Now() + k.LastUsedTime = k.AddedTime + return db.CreateSSHPublicKey(k), true +} + +func GetSSHPublicKeyByUserId(userId uint, pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) { + return db.GetSSHPublicKeyByUserId(userId, pageIndex, pageSize) +} + +func GetSSHPublicKeyByIdAndUserId(id uint, userId uint) (*model.SSHPublicKey, error) { + key, err := db.GetSSHPublicKeyById(id) + if err != nil { + return nil, err + } + if key.UserId != userId { + return nil, errors.Wrapf(err, "failed get old key") + } + return key, nil +} + +func UpdateSSHPublicKey(k *model.SSHPublicKey) error { + return db.UpdateSSHPublicKey(k) +} + +func DeleteSSHPublicKeyById(keyId uint) error { + return db.DeleteSSHPublicKeyById(keyId) +} diff --git a/server/handles/sshkey.go b/server/handles/sshkey.go new file mode 100644 index 00000000..c53b46f2 --- /dev/null +++ b/server/handles/sshkey.go @@ -0,0 +1,124 @@ +package handles + +import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/server/common" + "github.com/gin-gonic/gin" + "strconv" +) + +type SSHKeyAddReq struct { + Title string `json:"title" binding:"required"` + Key string `json:"key" binding:"required"` +} + +func AddMyPublicKey(c *gin.Context) { + userObj, ok := c.Value("user").(*model.User) + if !ok || userObj.IsGuest() { + common.ErrorStrResp(c, "user invalid", 401) + return + } + var req SSHKeyAddReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorStrResp(c, "request invalid", 400) + return + } + if req.Title == "" { + common.ErrorStrResp(c, "request invalid", 400) + return + } + key := &model.SSHPublicKey{ + Title: req.Title, + KeyStr: req.Key, + UserId: userObj.ID, + } + err, parsed := op.CreateSSHPublicKey(key) + if !parsed { + common.ErrorStrResp(c, "provided key invalid", 400) + return + } else if err != nil { + common.ErrorResp(c, err, 500, true) + return + } + common.SuccessResp(c) +} + +func ListMyPublicKey(c *gin.Context) { + userObj, ok := c.Value("user").(*model.User) + if !ok || userObj.IsGuest() { + common.ErrorStrResp(c, "user invalid", 401) + return + } + list(c, userObj) +} + +func DeleteMyPublicKey(c *gin.Context) { + userObj, ok := c.Value("user").(*model.User) + if !ok || userObj.IsGuest() { + common.ErrorStrResp(c, "user invalid", 401) + return + } + keyId, err := strconv.Atoi(c.Query("id")) + if err != nil { + common.ErrorStrResp(c, "id format invalid", 400) + return + } + key, err := op.GetSSHPublicKeyByIdAndUserId(uint(keyId), userObj.ID) + if err != nil { + common.ErrorStrResp(c, "failed to get public key", 404) + return + } + err = op.DeleteSSHPublicKeyById(key.ID) + if err != nil { + common.ErrorResp(c, err, 500, true) + return + } + common.SuccessResp(c) +} + +func ListPublicKeys(c *gin.Context) { + userId, err := strconv.Atoi(c.Query("uid")) + if err != nil { + common.ErrorStrResp(c, "user id format invalid", 400) + return + } + userObj, err := op.GetUserById(uint(userId)) + if err != nil { + common.ErrorStrResp(c, "user invalid", 404) + return + } + list(c, userObj) +} + +func DeletePublicKey(c *gin.Context) { + keyId, err := strconv.Atoi(c.Query("id")) + if err != nil { + common.ErrorStrResp(c, "id format invalid", 400) + return + } + err = op.DeleteSSHPublicKeyById(uint(keyId)) + if err != nil { + common.ErrorResp(c, err, 500, true) + return + } + common.SuccessResp(c) +} + +func list(c *gin.Context, userObj *model.User) { + var req model.PageReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + req.Validate() + keys, total, err := op.GetSSHPublicKeyByUserId(userObj.ID, req.Page, req.PerPage) + if err != nil { + common.ErrorResp(c, err, 500, true) + return + } + common.SuccessResp(c, common.PageResp{ + Content: keys, + Total: total, + }) +} diff --git a/server/router.go b/server/router.go index fffa840e..9ff50365 100644 --- a/server/router.go +++ b/server/router.go @@ -52,6 +52,9 @@ func Init(e *gin.Engine) { api.POST("/auth/login/ldap", handles.LoginLdap) auth.GET("/me", handles.CurrentUser) auth.POST("/me/update", handles.UpdateCurrent) + auth.GET("/me/sshkey/list", handles.ListMyPublicKey) + auth.POST("/me/sshkey/add", handles.AddMyPublicKey) + auth.POST("/me/sshkey/delete", handles.DeleteMyPublicKey) auth.POST("/auth/2fa/generate", handles.Generate2FA) auth.POST("/auth/2fa/verify", handles.Verify2FA) auth.GET("/auth/logout", handles.LogOut) @@ -102,6 +105,8 @@ func admin(g *gin.RouterGroup) { user.POST("/cancel_2fa", handles.Cancel2FAById) user.POST("/delete", handles.DeleteUser) user.POST("/del_cache", handles.DelUserCache) + user.GET("/sshkey/list", handles.ListPublicKeys) + user.POST("/sshkey/delete", handles.DeletePublicKey) storage := g.Group("/storage") storage.GET("/list", handles.ListStorages) diff --git a/server/sftp.go b/server/sftp.go index 3b07d472..d44046a4 100644 --- a/server/sftp.go +++ b/server/sftp.go @@ -12,6 +12,7 @@ import ( "github.com/pkg/errors" "golang.org/x/crypto/ssh" "net/http" + "time" ) type SftpDriver struct { @@ -35,6 +36,7 @@ func (d *SftpDriver) GetConfig() *sftpd.Config { NoClientAuth: true, NoClientAuthCallback: d.NoClientAuth, PasswordCallback: d.PasswordAuth, + PublicKeyCallback: d.PublicKeyAuth, AuthLogCallback: d.AuthLogCallback, BannerCallback: d.GetBanner, } @@ -85,14 +87,37 @@ func (d *SftpDriver) PasswordAuth(conn ssh.ConnMetadata, password []byte) (*ssh. if err != nil { return nil, err } + if userObj.Disabled || !userObj.CanFTPAccess() { + return nil, errors.New("user is not allowed to access via SFTP") + } passHash := model.StaticHash(string(password)) if err = userObj.ValidatePwdStaticHash(passHash); err != nil { return nil, err } + return nil, nil +} + +func (d *SftpDriver) PublicKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) { + userObj, err := op.GetUserByName(conn.User()) + if err != nil { + return nil, err + } if userObj.Disabled || !userObj.CanFTPAccess() { return nil, errors.New("user is not allowed to access via SFTP") } - return nil, nil + keys, _, err := op.GetSSHPublicKeyByUserId(userObj.ID, 1, -1) + if err != nil { + return nil, err + } + marshal := string(key.Marshal()) + for _, sk := range keys { + if marshal == sk.KeyStr { + sk.LastUsedTime = time.Now() + _ = op.UpdateSSHPublicKey(&sk) + return nil, nil + } + } + return nil, errors.New("public key refused") } func (d *SftpDriver) AuthLogCallback(conn ssh.ConnMetadata, method string, err error) { From c218b5701e72a941958aa2b681a5f2e7e1d10519 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Wed, 25 Dec 2024 21:16:03 +0800 Subject: [PATCH 062/187] fix(115): support float QPS (#7677) --- drivers/115/meta.go | 2 +- drivers/115_share/meta.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/115/meta.go b/drivers/115/meta.go index 3b192291..bcea1749 100644 --- a/drivers/115/meta.go +++ b/drivers/115/meta.go @@ -10,7 +10,7 @@ type Addition struct { QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` - LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate ([limit]r/1s)"` + LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"` driver.RootID } diff --git a/drivers/115_share/meta.go b/drivers/115_share/meta.go index 3fcc7b92..b3d2cc1f 100644 --- a/drivers/115_share/meta.go +++ b/drivers/115_share/meta.go @@ -10,7 +10,7 @@ type Addition struct { QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` - LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` + LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"` ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"` driver.RootID From 5ecf5e823c6410766843e47c116a101c15df088d Mon Sep 17 00:00:00 2001 From: "Feng.YJ" <32027253+huiyifyj@users.noreply.github.com> Date: Wed, 25 Dec 2024 21:16:34 +0800 Subject: [PATCH 063/187] fix(webauthn): handle error when removing webauthn credential (#7689) --- server/handles/webauthn.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/handles/webauthn.go b/server/handles/webauthn.go index 1bd1884e..c6a7650c 100644 --- a/server/handles/webauthn.go +++ b/server/handles/webauthn.go @@ -207,6 +207,10 @@ func DeleteAuthnLogin(c *gin.Context) { return } err = db.RemoveAuthn(user, req.ID) + if err != nil { + common.ErrorResp(c, err, 400) + return + } err = op.DelUserCache(user.Username) if err != nil { common.ErrorResp(c, err, 400) From 48916cdedff1bcff2a0e05c8e672246169e3c4ee Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Wed, 25 Dec 2024 21:17:58 +0800 Subject: [PATCH 064/187] fix(permission): enhance the strictness of permissions (#7705 close #7680) * fix(permission): enhance the strictness of permissions * fix: add initial permissions to admin --- internal/bootstrap/data/user.go | 13 +++++----- internal/model/user.go | 42 +++++++++++++++++---------------- server/webdav.go | 28 ++++++++++++++++------ server/webdav/file.go | 7 ++++++ 4 files changed, 57 insertions(+), 33 deletions(-) diff --git a/internal/bootstrap/data/user.go b/internal/bootstrap/data/user.go index 3b71e498..37cba7a5 100644 --- a/internal/bootstrap/data/user.go +++ b/internal/bootstrap/data/user.go @@ -26,12 +26,13 @@ func initUser() { if errors.Is(err, gorm.ErrRecordNotFound) { salt := random.String(16) admin = &model.User{ - Username: "admin", - Salt: salt, - PwdHash: model.TwoHashPwd(adminPassword, salt), - Role: model.ADMIN, - BasePath: "/", - Authn: "[]", + Username: "admin", + Salt: salt, + PwdHash: model.TwoHashPwd(adminPassword, salt), + Role: model.ADMIN, + BasePath: "/", + Authn: "[]", + Permission: 0xFF, // 0(can see hidden) - 7(can remove) } if err := op.CreateUser(admin); err != nil { panic(err) diff --git a/internal/model/user.go b/internal/model/user.go index b4e876a4..f75fc687 100644 --- a/internal/model/user.go +++ b/internal/model/user.go @@ -32,16 +32,18 @@ type User struct { Role int `json:"role"` // user's role Disabled bool `json:"disabled"` // Determine permissions by bit - // 0: can see hidden files - // 1: can access without password - // 2: can add offline download tasks - // 3: can mkdir and upload - // 4: can rename - // 5: can move - // 6: can copy - // 7: can remove - // 8: webdav read - // 9: webdav write + // 0: can see hidden files + // 1: can access without password + // 2: can add offline download tasks + // 3: can mkdir and upload + // 4: can rename + // 5: can move + // 6: can copy + // 7: can remove + // 8: webdav read + // 9: webdav write + // 10: ftp/sftp login and read + // 11: ftp/sftp write Permission int32 `json:"permission"` OtpSecret string `json:"-"` SsoID string `json:"sso_id"` // unique by sso platform @@ -78,43 +80,43 @@ func (u *User) SetPassword(pwd string) *User { } func (u *User) CanSeeHides() bool { - return u.IsAdmin() || u.Permission&1 == 1 + return u.Permission&1 == 1 } func (u *User) CanAccessWithoutPassword() bool { - return u.IsAdmin() || (u.Permission>>1)&1 == 1 + return (u.Permission>>1)&1 == 1 } func (u *User) CanAddOfflineDownloadTasks() bool { - return u.IsAdmin() || (u.Permission>>2)&1 == 1 + return (u.Permission>>2)&1 == 1 } func (u *User) CanWrite() bool { - return u.IsAdmin() || (u.Permission>>3)&1 == 1 + return (u.Permission>>3)&1 == 1 } func (u *User) CanRename() bool { - return u.IsAdmin() || (u.Permission>>4)&1 == 1 + return (u.Permission>>4)&1 == 1 } func (u *User) CanMove() bool { - return u.IsAdmin() || (u.Permission>>5)&1 == 1 + return (u.Permission>>5)&1 == 1 } func (u *User) CanCopy() bool { - return u.IsAdmin() || (u.Permission>>6)&1 == 1 + return (u.Permission>>6)&1 == 1 } func (u *User) CanRemove() bool { - return u.IsAdmin() || (u.Permission>>7)&1 == 1 + return (u.Permission>>7)&1 == 1 } func (u *User) CanWebdavRead() bool { - return u.IsAdmin() || (u.Permission>>8)&1 == 1 + return (u.Permission>>8)&1 == 1 } func (u *User) CanWebdavManage() bool { - return u.IsAdmin() || (u.Permission>>9)&1 == 1 + return (u.Permission>>9)&1 == 1 } func (u *User) CanFTPAccess() bool { diff --git a/server/webdav.go b/server/webdav.go index 2b5c9618..cdfdce7d 100644 --- a/server/webdav.go +++ b/server/webdav.go @@ -11,7 +11,6 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/setting" - "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/webdav" "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" @@ -99,12 +98,27 @@ func WebDAVAuth(c *gin.Context) { c.Abort() return } - if !user.CanWebdavManage() && utils.SliceContains([]string{"PUT", "DELETE", "PROPPATCH", "MKCOL", "COPY", "MOVE"}, c.Request.Method) { - if c.Request.Method == "OPTIONS" { - c.Set("user", guest) - c.Next() - return - } + if (c.Request.Method == "PUT" || c.Request.Method == "MKCOL") && (!user.CanWebdavManage() || !user.CanWrite()) { + c.Status(http.StatusForbidden) + c.Abort() + return + } + if c.Request.Method == "MOVE" && (!user.CanWebdavManage() || (!user.CanMove() && !user.CanRename())) { + c.Status(http.StatusForbidden) + c.Abort() + return + } + if c.Request.Method == "COPY" && (!user.CanWebdavManage() || !user.CanCopy()) { + c.Status(http.StatusForbidden) + c.Abort() + return + } + if c.Request.Method == "DELETE" && (!user.CanWebdavManage() || !user.CanRemove()) { + c.Status(http.StatusForbidden) + c.Abort() + return + } + if c.Request.Method == "PROPPATCH" && !user.CanWebdavManage() { c.Status(http.StatusForbidden) c.Abort() return diff --git a/server/webdav/file.go b/server/webdav/file.go index 01e96f7d..ac8f5c1c 100644 --- a/server/webdav/file.go +++ b/server/webdav/file.go @@ -33,6 +33,13 @@ func moveFiles(ctx context.Context, src, dst string, overwrite bool) (status int dstDir := path.Dir(dst) srcName := path.Base(src) dstName := path.Base(dst) + user := ctx.Value("user").(*model.User) + if srcDir != dstDir && !user.CanMove() { + return http.StatusForbidden, nil + } + if srcName != dstName && !user.CanRename() { + return http.StatusForbidden, nil + } if srcDir == dstDir { err = fs.Rename(ctx, src, dstName) } else { From 42243b1517d32e4ee4fa7bc6cad69b5e9bb2fdfa Mon Sep 17 00:00:00 2001 From: Jealous Date: Wed, 25 Dec 2024 21:23:58 +0800 Subject: [PATCH 065/187] feat(thunder): add offline download tool (#7673) * feat(thunder): add offline download tool * fix(thunder): improve error handling and parse file size in status response --------- Co-authored-by: Andy Hsu --- drivers/thunder/driver.go | 61 +++++++++ drivers/thunder/types.go | 47 +++++++ drivers/thunder/util.go | 1 + internal/offline_download/all.go | 1 + internal/offline_download/thunder/thunder.go | 126 +++++++++++++++++++ internal/offline_download/thunder/util.go | 42 +++++++ internal/offline_download/tool/add.go | 4 + internal/offline_download/tool/download.go | 6 + 8 files changed, 288 insertions(+) create mode 100644 internal/offline_download/thunder/thunder.go create mode 100644 internal/offline_download/thunder/util.go diff --git a/drivers/thunder/driver.go b/drivers/thunder/driver.go index 9ba5dd82..8403f261 100644 --- a/drivers/thunder/driver.go +++ b/drivers/thunder/driver.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "strconv" "strings" "github.com/alist-org/alist/v3/drivers/base" @@ -522,3 +523,63 @@ func (xc *XunLeiCommon) IsLogin() bool { _, err := xc.Request(XLUSER_API_URL+"/user/me", http.MethodGet, nil, nil) return err == nil } + +// 离线下载文件 +func (xc *XunLeiCommon) OfflineDownload(ctx context.Context, fileUrl string, parentDir model.Obj, fileName string) (*OfflineTask, error) { + var resp OfflineDownloadResp + _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { + r.SetContext(ctx) + r.SetBody(&base.Json{ + "kind": FILE, + "name": fileName, + "parent_id": parentDir.GetID(), + "upload_type": UPLOAD_TYPE_URL, + "url": base.Json{ + "url": fileUrl, + }, + }) + }, &resp) + + if err != nil { + return nil, err + } + + return &resp.Task, err +} + +/* +获取离线下载任务列表 +*/ +func (xc *XunLeiCommon) OfflineList(ctx context.Context, nextPageToken string) ([]OfflineTask, error) { + res := make([]OfflineTask, 0) + + var resp OfflineListResp + _, err := xc.Request(TASK_API_URL, http.MethodGet, func(req *resty.Request) { + req.SetContext(ctx). + SetQueryParams(map[string]string{ + "type": "offline", + "limit": "10000", + "page_token": nextPageToken, + }) + }, &resp) + + if err != nil { + return nil, fmt.Errorf("failed to get offline list: %w", err) + } + res = append(res, resp.Tasks...) + return res, nil +} + +func (xc *XunLeiCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string, deleteFiles bool) error { + _, err := xc.Request(TASK_API_URL, http.MethodDelete, func(req *resty.Request) { + req.SetContext(ctx). + SetQueryParams(map[string]string{ + "task_ids": strings.Join(taskIDs, ","), + "delete_files": strconv.FormatBool(deleteFiles), + }) + }, nil) + if err != nil { + return fmt.Errorf("failed to delete tasks %v: %w", taskIDs, err) + } + return nil +} diff --git a/drivers/thunder/types.go b/drivers/thunder/types.go index 7c223673..b7355b2a 100644 --- a/drivers/thunder/types.go +++ b/drivers/thunder/types.go @@ -204,3 +204,50 @@ type UploadTaskResponse struct { File Files `json:"file"` } + +// 添加离线下载响应 +type OfflineDownloadResp struct { + File *string `json:"file"` + Task OfflineTask `json:"task"` + UploadType string `json:"upload_type"` + URL struct { + Kind string `json:"kind"` + } `json:"url"` +} + +// 离线下载列表 +type OfflineListResp struct { + ExpiresIn int64 `json:"expires_in"` + NextPageToken string `json:"next_page_token"` + Tasks []OfflineTask `json:"tasks"` +} + +// offlineTask +type OfflineTask struct { + Callback string `json:"callback"` + CreatedTime string `json:"created_time"` + FileID string `json:"file_id"` + FileName string `json:"file_name"` + FileSize string `json:"file_size"` + IconLink string `json:"icon_link"` + ID string `json:"id"` + Kind string `json:"kind"` + Message string `json:"message"` + Name string `json:"name"` + Params Params `json:"params"` + Phase string `json:"phase"` // PHASE_TYPE_RUNNING, PHASE_TYPE_ERROR, PHASE_TYPE_COMPLETE, PHASE_TYPE_PENDING + Progress int64 `json:"progress"` + Space string `json:"space"` + StatusSize int64 `json:"status_size"` + Statuses []string `json:"statuses"` + ThirdTaskID string `json:"third_task_id"` + Type string `json:"type"` + UpdatedTime string `json:"updated_time"` + UserID string `json:"user_id"` +} + +type Params struct { + FolderType string `json:"folder_type"` + PredictSpeed string `json:"predict_speed"` + PredictType string `json:"predict_type"` +} diff --git a/drivers/thunder/util.go b/drivers/thunder/util.go index 3ec8db58..f509e6b2 100644 --- a/drivers/thunder/util.go +++ b/drivers/thunder/util.go @@ -17,6 +17,7 @@ import ( const ( API_URL = "https://api-pan.xunlei.com/drive/v1" FILE_API_URL = API_URL + "/files" + TASK_API_URL = API_URL + "/tasks" XLUSER_API_URL = "https://xluser-ssl.xunlei.com/v1" ) diff --git a/internal/offline_download/all.go b/internal/offline_download/all.go index 6682155d..3d0c7c73 100644 --- a/internal/offline_download/all.go +++ b/internal/offline_download/all.go @@ -6,5 +6,6 @@ import ( _ "github.com/alist-org/alist/v3/internal/offline_download/http" _ "github.com/alist-org/alist/v3/internal/offline_download/pikpak" _ "github.com/alist-org/alist/v3/internal/offline_download/qbit" + _ "github.com/alist-org/alist/v3/internal/offline_download/thunder" _ "github.com/alist-org/alist/v3/internal/offline_download/transmission" ) diff --git a/internal/offline_download/thunder/thunder.go b/internal/offline_download/thunder/thunder.go new file mode 100644 index 00000000..3ab8b002 --- /dev/null +++ b/internal/offline_download/thunder/thunder.go @@ -0,0 +1,126 @@ +package thunder + +import ( + "context" + "errors" + "fmt" + "strconv" + + "github.com/alist-org/alist/v3/drivers/thunder" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/offline_download/tool" + "github.com/alist-org/alist/v3/internal/op" +) + +type Thunder struct { + refreshTaskCache bool +} + +func (t *Thunder) Name() string { + return "thunder" +} + +func (t *Thunder) Items() []model.SettingItem { + return nil +} + +func (t *Thunder) Run(task *tool.DownloadTask) error { + return errs.NotSupport +} + +func (t *Thunder) Init() (string, error) { + t.refreshTaskCache = false + return "ok", nil +} + +func (t *Thunder) IsReady() bool { + return true +} + +func (t *Thunder) AddURL(args *tool.AddUrlArgs) (string, error) { + // 添加新任务刷新缓存 + t.refreshTaskCache = true + // args.TempDir 已经被修改为了 DstDirPath + storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir) + if err != nil { + return "", err + } + thunderDriver, ok := storage.(*thunder.Thunder) + if !ok { + return "", fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported") + } + + ctx := context.Background() + parentDir, err := op.GetUnwrap(ctx, storage, actualPath) + if err != nil { + return "", err + } + + task, err := thunderDriver.OfflineDownload(ctx, args.Url, parentDir, "") + if err != nil { + return "", fmt.Errorf("failed to add offline download task: %w", err) + } + + return task.ID, nil +} + +func (t *Thunder) Remove(task *tool.DownloadTask) error { + storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + if err != nil { + return err + } + thunderDriver, ok := storage.(*thunder.Thunder) + if !ok { + return fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported") + } + ctx := context.Background() + err = thunderDriver.DeleteOfflineTasks(ctx, []string{task.GID}, false) + if err != nil { + return err + } + return nil +} + +func (t *Thunder) Status(task *tool.DownloadTask) (*tool.Status, error) { + storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + if err != nil { + return nil, err + } + thunderDriver, ok := storage.(*thunder.Thunder) + if !ok { + return nil, fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported") + } + tasks, err := t.GetTasks(thunderDriver) + if err != nil { + return nil, err + } + s := &tool.Status{ + Progress: 0, + NewGID: "", + Completed: false, + Status: "the task has been deleted", + Err: nil, + } + for _, t := range tasks { + if t.ID == task.GID { + s.Progress = float64(t.Progress) + s.Status = t.Message + s.Completed = (t.Phase == "PHASE_TYPE_COMPLETE") + s.TotalBytes, err = strconv.ParseInt(t.FileSize, 10, 64) + if err != nil { + s.TotalBytes = 0 + } + if t.Phase == "PHASE_TYPE_ERROR" { + s.Err = errors.New(t.Message) + } + return s, nil + } + } + s.Err = fmt.Errorf("the task has been deleted") + return s, nil +} + +func init() { + tool.Tools.Add(&Thunder{}) +} diff --git a/internal/offline_download/thunder/util.go b/internal/offline_download/thunder/util.go new file mode 100644 index 00000000..ea400f32 --- /dev/null +++ b/internal/offline_download/thunder/util.go @@ -0,0 +1,42 @@ +package thunder + +import ( + "context" + "time" + + "github.com/Xhofe/go-cache" + "github.com/alist-org/alist/v3/drivers/thunder" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/singleflight" +) + +var taskCache = cache.NewMemCache(cache.WithShards[[]thunder.OfflineTask](16)) +var taskG singleflight.Group[[]thunder.OfflineTask] + +func (t *Thunder) GetTasks(thunderDriver *thunder.Thunder) ([]thunder.OfflineTask, error) { + key := op.Key(thunderDriver, "/drive/v1/task") + if !t.refreshTaskCache { + if tasks, ok := taskCache.Get(key); ok { + return tasks, nil + } + } + t.refreshTaskCache = false + tasks, err, _ := taskG.Do(key, func() ([]thunder.OfflineTask, error) { + ctx := context.Background() + tasks, err := thunderDriver.OfflineList(ctx, "") + if err != nil { + return nil, err + } + // 添加缓存 10s + if len(tasks) > 0 { + taskCache.Set(key, tasks, cache.WithEx[[]thunder.OfflineTask](time.Second*10)) + } else { + taskCache.Del(key) + } + return tasks, nil + }) + if err != nil { + return nil, err + } + return tasks, nil +} diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go index 42349e2e..4158051a 100644 --- a/internal/offline_download/tool/add.go +++ b/internal/offline_download/tool/add.go @@ -77,6 +77,10 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro tempDir = args.DstDirPath // 防止将下载好的文件删除 deletePolicy = DeleteNever + case "thunder": + tempDir = args.DstDirPath + // 防止将下载好的文件删除 + deletePolicy = DeleteNever } taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go index a0f1a81b..94bf7dbb 100644 --- a/internal/offline_download/tool/download.go +++ b/internal/offline_download/tool/download.go @@ -83,6 +83,9 @@ outer: if t.tool.Name() == "pikpak" { return nil } + if t.tool.Name() == "thunder" { + return nil + } if t.tool.Name() == "115 Cloud" { // hack for 115 <-time.After(time.Second * 1) @@ -161,6 +164,9 @@ func (t *DownloadTask) Complete() error { if t.tool.Name() == "pikpak" { return nil } + if t.tool.Name() == "thunder" { + return nil + } if t.tool.Name() == "115 Cloud" { return nil } From 5994c17b4efca630d8ecec88475f2f243781d615 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Mon, 30 Dec 2024 22:48:33 +0800 Subject: [PATCH 066/187] feat(patch): upgrade patch module (#7738) * feat(patch): upgrade patch module * chore(patch): add docs * fix(patch): skip and rewrite invalid last launched version * fix(patch): turn two functions into patches --- cmd/common.go | 1 + internal/bootstrap/config.go | 6 ++ internal/bootstrap/data/user.go | 35 ---------- internal/bootstrap/patch.go | 67 +++++++++++++++++++ internal/bootstrap/patch/all.go | 35 ++++++++++ .../bootstrap/patch/v3_24_0/hash_password.go | 26 +++++++ .../bootstrap/patch/v3_32_0/update_authn.go | 25 +++++++ .../patch/v3_41_0/grant_permission.go | 22 ++++++ internal/conf/config.go | 2 + 9 files changed, 184 insertions(+), 35 deletions(-) create mode 100644 internal/bootstrap/patch.go create mode 100644 internal/bootstrap/patch/all.go create mode 100644 internal/bootstrap/patch/v3_24_0/hash_password.go create mode 100644 internal/bootstrap/patch/v3_32_0/update_authn.go create mode 100644 internal/bootstrap/patch/v3_41_0/grant_permission.go diff --git a/cmd/common.go b/cmd/common.go index fabc3a90..beb558f5 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -19,6 +19,7 @@ func Init() { bootstrap.InitDB() data.InitData() bootstrap.InitIndex() + bootstrap.InitUpgradePatch() } func Release() { diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index 27174c23..a44c7350 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -34,6 +34,8 @@ func InitConfig() { log.Fatalf("failed to create config file: %+v", err) } conf.Conf = conf.DefaultConfig() + LastLaunchedVersion = conf.Version + conf.Conf.LastLaunchedVersion = conf.Version if !utils.WriteJsonToFile(configPath, conf.Conf) { log.Fatalf("failed to create default config file") } @@ -47,6 +49,10 @@ func InitConfig() { if err != nil { log.Fatalf("load config error: %+v", err) } + LastLaunchedVersion = conf.Conf.LastLaunchedVersion + if conf.Version != "dev" || LastLaunchedVersion == "" { + conf.Conf.LastLaunchedVersion = conf.Version + } // update config.json struct confBody, err := utils.Json.MarshalIndent(conf.Conf, "", " ") if err != nil { diff --git a/internal/bootstrap/data/user.go b/internal/bootstrap/data/user.go index 37cba7a5..5b596a85 100644 --- a/internal/bootstrap/data/user.go +++ b/internal/bootstrap/data/user.go @@ -64,39 +64,4 @@ func initUser() { utils.Log.Fatalf("[init user] Failed to get guest user: %v", err) } } - hashPwdForOldVersion() - updateAuthnForOldVersion() -} - -func hashPwdForOldVersion() { - users, _, err := op.GetUsers(1, -1) - if err != nil { - utils.Log.Fatalf("[hash pwd for old version] failed get users: %v", err) - } - for i := range users { - user := users[i] - if user.PwdHash == "" { - user.SetPassword(user.Password) - user.Password = "" - if err := db.UpdateUser(&user); err != nil { - utils.Log.Fatalf("[hash pwd for old version] failed update user: %v", err) - } - } - } -} - -func updateAuthnForOldVersion() { - users, _, err := op.GetUsers(1, -1) - if err != nil { - utils.Log.Fatalf("[update authn for old version] failed get users: %v", err) - } - for i := range users { - user := users[i] - if user.Authn == "" { - user.Authn = "[]" - if err := db.UpdateUser(&user); err != nil { - utils.Log.Fatalf("[update authn for old version] failed update user: %v", err) - } - } - } } diff --git a/internal/bootstrap/patch.go b/internal/bootstrap/patch.go new file mode 100644 index 00000000..8dc3ed02 --- /dev/null +++ b/internal/bootstrap/patch.go @@ -0,0 +1,67 @@ +package bootstrap + +import ( + "fmt" + "github.com/alist-org/alist/v3/internal/bootstrap/patch" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/pkg/utils" +) + +var LastLaunchedVersion = "" + +func safeCall(v string, i int, f func()) { + defer func() { + if r := recover(); r != nil { + utils.Log.Errorf("Recovered from patch (version: %s, index: %d) panic: %v", v, i, r) + } + }() + + f() +} + +func getVersion(v string) (major, minor, patchNum int, err error) { + _, err = fmt.Sscanf(v, "v%d.%d.%d", &major, &minor, &patchNum) + return major, minor, patchNum, err +} + +func compareVersion(majorA, minorA, patchNumA, majorB, minorB, patchNumB int) bool { + if majorA != majorB { + return majorA > majorB + } + if minorA != minorB { + return minorA > minorB + } + if patchNumA != patchNumB { + return patchNumA > patchNumB + } + return true +} + +func InitUpgradePatch() { + if conf.Version == "dev" { + return + } + if LastLaunchedVersion == conf.Version { + return + } + if LastLaunchedVersion == "" { + LastLaunchedVersion = "v0.0.0" + } + major, minor, patchNum, err := getVersion(LastLaunchedVersion) + if err != nil { + utils.Log.Warnf("Failed to parse last launched version %s: %v, skipping all patches and rewrite last launched version", LastLaunchedVersion, err) + return + } + for _, vp := range patch.UpgradePatches { + ma, mi, pn, err := getVersion(vp.Version) + if err != nil { + utils.Log.Errorf("Skip invalid version %s patches: %v", vp.Version, err) + continue + } + if compareVersion(ma, mi, pn, major, minor, patchNum) { + for i, p := range vp.Patches { + safeCall(vp.Version, i, p) + } + } + } +} diff --git a/internal/bootstrap/patch/all.go b/internal/bootstrap/patch/all.go new file mode 100644 index 00000000..b363d129 --- /dev/null +++ b/internal/bootstrap/patch/all.go @@ -0,0 +1,35 @@ +package patch + +import ( + "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_24_0" + "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_32_0" + "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_41_0" +) + +type VersionPatches struct { + // Version means if the system is upgraded from Version or an earlier one + // to the current version, all patches in Patches will be executed. + Version string + Patches []func() +} + +var UpgradePatches = []VersionPatches{ + { + Version: "v3.24.0", + Patches: []func(){ + v3_24_0.HashPwdForOldVersion, + }, + }, + { + Version: "v3.32.0", + Patches: []func(){ + v3_32_0.UpdateAuthnForOldVersion, + }, + }, + { + Version: "v3.41.0", + Patches: []func(){ + v3_41_0.GrantAdminPermissions, + }, + }, +} diff --git a/internal/bootstrap/patch/v3_24_0/hash_password.go b/internal/bootstrap/patch/v3_24_0/hash_password.go new file mode 100644 index 00000000..2adb640d --- /dev/null +++ b/internal/bootstrap/patch/v3_24_0/hash_password.go @@ -0,0 +1,26 @@ +package v3_24_0 + +import ( + "github.com/alist-org/alist/v3/internal/db" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" +) + +// HashPwdForOldVersion encode passwords using SHA256 +// First published: 75acbcc perf: sha256 for user's password (close #3552) by Andy Hsu +func HashPwdForOldVersion() { + users, _, err := op.GetUsers(1, -1) + if err != nil { + utils.Log.Fatalf("[hash pwd for old version] failed get users: %v", err) + } + for i := range users { + user := users[i] + if user.PwdHash == "" { + user.SetPassword(user.Password) + user.Password = "" + if err := db.UpdateUser(&user); err != nil { + utils.Log.Fatalf("[hash pwd for old version] failed update user: %v", err) + } + } + } +} diff --git a/internal/bootstrap/patch/v3_32_0/update_authn.go b/internal/bootstrap/patch/v3_32_0/update_authn.go new file mode 100644 index 00000000..92a594fd --- /dev/null +++ b/internal/bootstrap/patch/v3_32_0/update_authn.go @@ -0,0 +1,25 @@ +package v3_32_0 + +import ( + "github.com/alist-org/alist/v3/internal/db" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" +) + +// UpdateAuthnForOldVersion updates users' authn +// First published: bdfc159 fix: webauthn logspam (#6181) by itsHenry +func UpdateAuthnForOldVersion() { + users, _, err := op.GetUsers(1, -1) + if err != nil { + utils.Log.Fatalf("[update authn for old version] failed get users: %v", err) + } + for i := range users { + user := users[i] + if user.Authn == "" { + user.Authn = "[]" + if err := db.UpdateUser(&user); err != nil { + utils.Log.Fatalf("[update authn for old version] failed update user: %v", err) + } + } + } +} diff --git a/internal/bootstrap/patch/v3_41_0/grant_permission.go b/internal/bootstrap/patch/v3_41_0/grant_permission.go new file mode 100644 index 00000000..d658d184 --- /dev/null +++ b/internal/bootstrap/patch/v3_41_0/grant_permission.go @@ -0,0 +1,22 @@ +package v3_41_0 + +import ( + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" +) + +// GrantAdminPermissions gives admin Permission 0(can see hidden) - 9(webdav manage) +// This patch is written to help users upgrading from older version better adapt to PR AlistGo/alist#7705. +func GrantAdminPermissions() { + admin, err := op.GetAdmin() + if err != nil { + utils.Log.Errorf("Cannot grant permissions to admin: %v", err) + } + if (admin.Permission & 0x3FF) == 0 { + admin.Permission |= 0x3FF + } + err = op.UpdateUser(admin) + if err != nil { + utils.Log.Errorf("Cannot grant permissions to admin: %v", err) + } +} diff --git a/internal/conf/config.go b/internal/conf/config.go index a9b38242..d015cda0 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -110,6 +110,7 @@ type Config struct { S3 S3 `json:"s3" envPrefix:"S3_"` FTP FTP `json:"ftp" envPrefix:"FTP_"` SFTP SFTP `json:"sftp" envPrefix:"SFTP_"` + LastLaunchedVersion string `json:"last_launched_version"` } func DefaultConfig() *Config { @@ -195,5 +196,6 @@ func DefaultConfig() *Config { Enable: false, Listen: ":5222", }, + LastLaunchedVersion: "", } } From 365fc40dfec03411d3d8231090495761e012c525 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Mon, 30 Dec 2024 22:49:18 +0800 Subject: [PATCH 067/187] fix: static page to limit request method (#7745 close #7667) --- server/static/static.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/static/static.go b/server/static/static.go index ec16014c..d5d6ff68 100644 --- a/server/static/static.go +++ b/server/static/static.go @@ -102,6 +102,10 @@ func Static(r *gin.RouterGroup, noRoute func(handlers ...gin.HandlerFunc)) { } noRoute(func(c *gin.Context) { + if c.Request.Method != "GET" && c.Request.Method != "POST" { + c.Status(405) + return + } c.Header("Content-Type", "text/html") c.Status(200) if strings.HasPrefix(c.Request.URL.Path, "/@manage") { From 4dce53d72b0d201ce7cf0e88afebbd7b843f6756 Mon Sep 17 00:00:00 2001 From: Mmx <36563672+Mmx233@users.noreply.github.com> Date: Mon, 30 Dec 2024 22:51:05 +0800 Subject: [PATCH 068/187] feat(docker release): improve aria2 image, add aio image (#7750) * build: add argument INSTALL_ARIA2 to dockerfile * feat: run aria2 in main entrypoint * feat(ci): environment matrix for docker release * improve(ci): allow overwrite artifacts in docker release * fix(ci): permission of alist binary in docker; entrypoint logic * improve(aria2): move aria2 data to /opt/aria2; fix permission issues References: https://github.com/AlistGo/with_aria2/pull/13 Co-authored-by: GoodbyeNJN * fix(ci): aio image is not taking effect * fix(build): tar command in aria2 installation process (cherry picked from commit 647285408354807bae64df6a20fefb696ff787de) --------- Co-authored-by: GoodbyeNJN --- .github/workflows/release_docker.yml | 116 ++++++++++++++------------- Dockerfile | 17 +++- Dockerfile.ci | 17 +++- entrypoint.sh | 12 ++- 4 files changed, 100 insertions(+), 62 deletions(-) diff --git a/.github/workflows/release_docker.yml b/.github/workflows/release_docker.yml index a2dd2dd7..0f559a3f 100644 --- a/.github/workflows/release_docker.yml +++ b/.github/workflows/release_docker.yml @@ -5,9 +5,16 @@ on: tags: - 'v*' +env: + IMAGE_REGISTRY: 'xhofe/alist' + REGISTRY_USERNAME: 'xhofe' + REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} + ARTIFACT_NAME: 'binaries_docker_release' + RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64' + jobs: - release_docker: - name: Release Docker + build_binary: + name: Build Binaries for Docker Release runs-on: ubuntu-latest steps: - name: Checkout @@ -31,11 +38,45 @@ jobs: - name: Build go binary run: bash build.sh release docker-multiplatform - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 + - name: Upload artifacts + uses: actions/upload-artifact@v4 with: - images: xhofe/alist + name: ${{ env.ARTIFACT_NAME }} + overwrite: true + path: | + build/ + !build/*.tgz + !build/musl-libs/** + + release_docker: + needs: build_binary + name: Release Docker image + runs-on: ubuntu-latest + strategy: + matrix: + image: ["latest", "ffmpeg", "aria2", "aio"] + include: + - image: "latest" + build_arg: "" + tag_favor: "" + - image: "ffmpeg" + build_arg: INSTALL_FFMPEG=true + tag_favor: "suffix=-ffmpeg,onlatest=true" + - image: "aria2" + build_arg: INSTALL_ARIA2=true + tag_favor: "suffix=-aria2,onlatest=true" + - image: "aio" + build_arg: | + INSTALL_FFMPEG=true + INSTALL_ARIA2=true + tag_favor: "suffix=-aio,onlatest=true" + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/download-artifact@v4 + with: + name: ${{ env.ARTIFACT_NAME }} + path: 'build/' - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -46,8 +87,17 @@ jobs: - name: Login to DockerHub uses: docker/login-action@v3 with: - username: xhofe - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ env.REGISTRY_USERNAME }} + password: ${{ env.REGISTRY_PASSWORD }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE_REGISTRY }} + flavor: | + latest=true + ${{ matrix.tag_favor }} - name: Build and push id: docker_build @@ -56,53 +106,7 @@ jobs: context: . file: Dockerfile.ci push: true + build-args: ${{ matrix.build_arg }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64 - - - name: Docker meta with ffmpeg - id: meta-ffmpeg - uses: docker/metadata-action@v5 - with: - images: xhofe/alist - flavor: | - latest=true - suffix=-ffmpeg,onlatest=true - - - name: Build and push with ffmpeg - id: docker_build_ffmpeg - uses: docker/build-push-action@v6 - with: - context: . - file: Dockerfile.ci - push: true - tags: ${{ steps.meta-ffmpeg.outputs.tags }} - labels: ${{ steps.meta-ffmpeg.outputs.labels }} - build-args: INSTALL_FFMPEG=true - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64 - - release_docker_with_aria2: - needs: release_docker - name: Release docker with aria2 - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@v4 - with: - repository: alist-org/with_aria2 - ref: main - persist-credentials: false - fetch-depth: 0 - - - name: Add tag - run: | - git config --local user.email "bot@nn.ci" - git config --local user.name "IlaBot" - git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}" - - - name: Push tags - uses: ad-m/github-push-action@master - with: - github_token: ${{ secrets.MY_TOKEN }} - branch: main - repository: alist-org/with_aria2 + platforms: ${{ env.RELEASE_PLATFORMS }} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 74fa2165..0e2ee96f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,7 @@ RUN bash build.sh release docker FROM alpine:edge ARG INSTALL_FFMPEG=false +ARG INSTALL_ARIA2=false LABEL MAINTAINER="i@nn.ci" WORKDIR /opt/alist/ @@ -18,13 +19,25 @@ RUN apk update && \ apk upgrade --no-cache && \ apk add --no-cache bash ca-certificates su-exec tzdata; \ [ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \ + [ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \ + mkdir -p /opt/aria2/.aria2 && \ + wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \ + tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \ + sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \ + sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \ + touch /opt/aria2/.aria2/aria2.session && \ + /opt/aria2/.aria2/tracker.sh ; \ rm -rf /var/cache/apk/* COPY --from=builder /app/bin/alist ./ COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh && /entrypoint.sh version +RUN chmod +x /opt/alist/alist && \ + chmod +x /entrypoint.sh && /entrypoint.sh version -ENV PUID=0 PGID=0 UMASK=022 +ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} VOLUME /opt/alist/data/ EXPOSE 5244 5245 CMD [ "/entrypoint.sh" ] \ No newline at end of file diff --git a/Dockerfile.ci b/Dockerfile.ci index 3f437f16..25d502a9 100644 --- a/Dockerfile.ci +++ b/Dockerfile.ci @@ -2,6 +2,7 @@ FROM alpine:edge ARG TARGETPLATFORM ARG INSTALL_FFMPEG=false +ARG INSTALL_ARIA2=false LABEL MAINTAINER="i@nn.ci" WORKDIR /opt/alist/ @@ -10,13 +11,25 @@ RUN apk update && \ apk upgrade --no-cache && \ apk add --no-cache bash ca-certificates su-exec tzdata; \ [ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \ + [ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \ + mkdir -p /opt/aria2/.aria2 && \ + wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \ + tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \ + sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \ + sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \ + touch /opt/aria2/.aria2/aria2.session && \ + /opt/aria2/.aria2/tracker.sh ; \ rm -rf /var/cache/apk/* COPY /build/${TARGETPLATFORM}/alist ./ COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh && /entrypoint.sh version +RUN chmod +x /opt/alist/alist && \ + chmod +x /entrypoint.sh && /entrypoint.sh version -ENV PUID=0 PGID=0 UMASK=022 +ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} VOLUME /opt/alist/data/ EXPOSE 5244 5245 CMD [ "/entrypoint.sh" ] \ No newline at end of file diff --git a/entrypoint.sh b/entrypoint.sh index a0d80835..28a18d7d 100644 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -1,11 +1,19 @@ #!/bin/bash -chown -R ${PUID}:${PGID} /opt/alist/ - umask ${UMASK} if [ "$1" = "version" ]; then ./alist version else + if [ "$RUN_ARIA2" = "true" ]; then + chown -R ${PUID}:${PGID} /opt/aria2/ + exec su-exec ${PUID}:${PGID} nohup aria2c \ + --enable-rpc \ + --rpc-allow-origin-all \ + --conf-path=/opt/aria2/.aria2/aria2.conf \ + >/dev/null 2>&1 & + fi + + chown -R ${PUID}:${PGID} /opt/alist/ exec su-exec ${PUID}:${PGID} ./alist server --no-prefix fi \ No newline at end of file From 040dc14ee626491ccbd9db29b06aa2713c0ae758 Mon Sep 17 00:00:00 2001 From: Sakana Date: Mon, 30 Dec 2024 22:51:39 +0800 Subject: [PATCH 069/187] fix(lenovonas_share): stoken expire (#7727) --- drivers/lenovonas_share/driver.go | 44 ++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/drivers/lenovonas_share/driver.go b/drivers/lenovonas_share/driver.go index 12e85143..684a2dda 100644 --- a/drivers/lenovonas_share/driver.go +++ b/drivers/lenovonas_share/driver.go @@ -3,6 +3,7 @@ package LenovoNasShare import ( "context" "net/http" + "time" "github.com/go-resty/resty/v2" @@ -15,7 +16,8 @@ import ( type LenovoNasShare struct { model.Storage Addition - stoken string + stoken string + expireAt int64 } func (d *LenovoNasShare) Config() driver.Config { @@ -27,20 +29,9 @@ func (d *LenovoNasShare) GetAddition() driver.Additional { } func (d *LenovoNasShare) Init(ctx context.Context) error { - if d.Host == "" { - d.Host = "https://siot-share.lenovo.com.cn" - } - query := map[string]string{ - "code": d.ShareId, - "password": d.SharePwd, - } - resp, err := d.request(d.Host+"/oneproxy/api/share/v1/access", http.MethodGet, func(req *resty.Request) { - req.SetQueryParams(query) - }, nil) - if err != nil { + if err := d.getStoken(); err != nil { return err } - d.stoken = utils.Json.Get(resp, "data", "stoken").ToString() return nil } @@ -49,6 +40,7 @@ func (d *LenovoNasShare) Drop(ctx context.Context) error { } func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + d.checkStoken() // 检查stoken是否过期 files := make([]File, 0) var resp Files @@ -71,7 +63,33 @@ func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.Lis }) } +func (d *LenovoNasShare) checkStoken() { // 检查stoken是否过期 + if d.expireAt < time.Now().Unix() { + d.getStoken() + } +} + +func (d *LenovoNasShare) getStoken() error { // 获取stoken + if d.Host == "" { + d.Host = "https://siot-share.lenovo.com.cn" + } + query := map[string]string{ + "code": d.ShareId, + "password": d.SharePwd, + } + resp, err := d.request(d.Host+"/oneproxy/api/share/v1/access", http.MethodGet, func(req *resty.Request) { + req.SetQueryParams(query) + }, nil) + if err != nil { + return err + } + d.stoken = utils.Json.Get(resp, "data", "stoken").ToString() + d.expireAt = utils.Json.Get(resp, "data", "expires_in").ToInt64() + time.Now().Unix() - 60 + return nil +} + func (d *LenovoNasShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + d.checkStoken() // 检查stoken是否过期 query := map[string]string{ "code": d.ShareId, "stoken": d.stoken, From ed149be84b75f03147f8c05e6a40a5daf45570d9 Mon Sep 17 00:00:00 2001 From: Jealous Date: Mon, 30 Dec 2024 22:52:55 +0800 Subject: [PATCH 070/187] feat(index): add `disable index` option for storages (#7730) --- internal/model/storage.go | 5 ++++- internal/op/driver.go | 6 ++++++ internal/search/build.go | 5 +++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/internal/model/storage.go b/internal/model/storage.go index 14bcf45f..e3c7e1f9 100644 --- a/internal/model/storage.go +++ b/internal/model/storage.go @@ -1,6 +1,8 @@ package model -import "time" +import ( + "time" +) type Storage struct { ID uint `json:"id" gorm:"primaryKey"` // unique key @@ -13,6 +15,7 @@ type Storage struct { Remark string `json:"remark"` Modified time.Time `json:"modified"` Disabled bool `json:"disabled"` // if disabled + DisableIndex bool `json:"disable_index"` EnableSign bool `json:"enable_sign"` Sort Proxy diff --git a/internal/op/driver.go b/internal/op/driver.go index 4f10e8e2..41b6f6d4 100644 --- a/internal/op/driver.go +++ b/internal/op/driver.go @@ -133,6 +133,12 @@ func getMainItems(config driver.Config) []driver.Item { Type: conf.TypeSelect, Options: "front,back", }) + items = append(items, driver.Item{ + Name: "disable_index", + Type: conf.TypeBool, + Default: "false", + Required: true, + }) items = append(items, driver.Item{ Name: "enable_sign", Type: conf.TypeBool, diff --git a/internal/search/build.go b/internal/search/build.go index 9865b298..2888c1f4 100644 --- a/internal/search/build.go +++ b/internal/search/build.go @@ -157,6 +157,11 @@ func BuildIndex(ctx context.Context, indexPaths, ignorePaths []string, maxDepth return filepath.SkipDir } } + if storage, _, err := op.GetStorageAndActualPath(indexPath); err == nil { + if storage.GetStorage().DisableIndex { + return filepath.SkipDir + } + } // ignore root if indexPath == "/" { return nil From aa1082a56c4d17377667daf9b888ae9270d2c1be Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Mon, 30 Dec 2024 22:54:37 +0800 Subject: [PATCH 071/187] feat(sftp-server): do not generate host key until first enabled (#7734) --- cmd/common.go | 1 - internal/conf/var.go | 3 -- server/sftp.go | 6 ++-- server/{ftp => sftp}/const.go | 2 +- .../ssh.go => server/sftp/hostkey.go | 12 ++++--- server/{ftp => sftp}/sftp.go | 33 ++++++++++--------- 6 files changed, 30 insertions(+), 27 deletions(-) rename server/{ftp => sftp}/const.go (94%) rename internal/bootstrap/ssh.go => server/sftp/hostkey.go (94%) rename server/{ftp => sftp}/sftp.go (64%) diff --git a/cmd/common.go b/cmd/common.go index beb558f5..47a25f3f 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -15,7 +15,6 @@ import ( func Init() { bootstrap.InitConfig() bootstrap.Log() - bootstrap.InitHostKey() bootstrap.InitDB() data.InitData() bootstrap.InitIndex() diff --git a/internal/conf/var.go b/internal/conf/var.go index b7277e41..0a8eb16f 100644 --- a/internal/conf/var.go +++ b/internal/conf/var.go @@ -1,7 +1,6 @@ package conf import ( - "golang.org/x/crypto/ssh" "net/url" "regexp" ) @@ -33,5 +32,3 @@ var ( ManageHtml string IndexHtml string ) - -var SSHSigners []ssh.Signer diff --git a/server/sftp.go b/server/sftp.go index d44046a4..0455c962 100644 --- a/server/sftp.go +++ b/server/sftp.go @@ -9,6 +9,7 @@ import ( "github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/ftp" + "github.com/alist-org/alist/v3/server/sftp" "github.com/pkg/errors" "golang.org/x/crypto/ssh" "net/http" @@ -21,6 +22,7 @@ type SftpDriver struct { } func NewSftpDriver() (*SftpDriver, error) { + sftp.InitHostKey() header := &http.Header{} header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent)) return &SftpDriver{ @@ -40,7 +42,7 @@ func (d *SftpDriver) GetConfig() *sftpd.Config { AuthLogCallback: d.AuthLogCallback, BannerCallback: d.GetBanner, } - for _, k := range conf.SSHSigners { + for _, k := range sftp.SSHSigners { serverConfig.AddHostKey(k) } d.config = &sftpd.Config{ @@ -62,7 +64,7 @@ func (d *SftpDriver) GetFileSystem(sc *ssh.ServerConn) (sftpd.FileSystem, error) ctx = context.WithValue(ctx, "meta_pass", "") ctx = context.WithValue(ctx, "client_ip", sc.RemoteAddr().String()) ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader) - return &ftp.SftpDriverAdapter{FtpDriver: ftp.NewAferoAdapter(ctx)}, nil + return &sftp.DriverAdapter{FtpDriver: ftp.NewAferoAdapter(ctx)}, nil } func (d *SftpDriver) Close() { diff --git a/server/ftp/const.go b/server/sftp/const.go similarity index 94% rename from server/ftp/const.go rename to server/sftp/const.go index 1fd14e82..58bfe382 100644 --- a/server/ftp/const.go +++ b/server/sftp/const.go @@ -1,4 +1,4 @@ -package ftp +package sftp // From leffss/sftpd const ( diff --git a/internal/bootstrap/ssh.go b/server/sftp/hostkey.go similarity index 94% rename from internal/bootstrap/ssh.go rename to server/sftp/hostkey.go index ec4a07ac..0db103dd 100644 --- a/internal/bootstrap/ssh.go +++ b/server/sftp/hostkey.go @@ -1,4 +1,4 @@ -package bootstrap +package sftp import ( "crypto/rand" @@ -7,14 +7,18 @@ import ( "encoding/pem" "fmt" "github.com/alist-org/alist/v3/cmd/flags" - "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/pkg/utils" "golang.org/x/crypto/ssh" "os" "path/filepath" ) +var SSHSigners []ssh.Signer + func InitHostKey() { + if SSHSigners != nil { + return + } sshPath := filepath.Join(flags.DataDir, "ssh") if !utils.Exists(sshPath) { err := utils.CreateNestedDirectory(sshPath) @@ -23,9 +27,9 @@ func InitHostKey() { return } } - conf.SSHSigners = make([]ssh.Signer, 0, 4) + SSHSigners = make([]ssh.Signer, 0, 4) if rsaKey, ok := LoadOrGenerateRSAHostKey(sshPath); ok { - conf.SSHSigners = append(conf.SSHSigners, rsaKey) + SSHSigners = append(SSHSigners, rsaKey) } // TODO Add keys for other encryption algorithms } diff --git a/server/ftp/sftp.go b/server/sftp/sftp.go similarity index 64% rename from server/ftp/sftp.go rename to server/sftp/sftp.go index 0a11ee18..1ceb3f59 100644 --- a/server/ftp/sftp.go +++ b/server/sftp/sftp.go @@ -1,44 +1,45 @@ -package ftp +package sftp import ( "github.com/KirCute/sftpd-alist" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/ftp" "os" ) -type SftpDriverAdapter struct { - FtpDriver *AferoAdapter +type DriverAdapter struct { + FtpDriver *ftp.AferoAdapter } -func (s *SftpDriverAdapter) OpenFile(_ string, _ uint32, _ *sftpd.Attr) (sftpd.File, error) { +func (s *DriverAdapter) OpenFile(_ string, _ uint32, _ *sftpd.Attr) (sftpd.File, error) { // See also GetHandle return nil, errs.NotImplement } -func (s *SftpDriverAdapter) OpenDir(_ string) (sftpd.Dir, error) { +func (s *DriverAdapter) OpenDir(_ string) (sftpd.Dir, error) { // See also GetHandle return nil, errs.NotImplement } -func (s *SftpDriverAdapter) Remove(name string) error { +func (s *DriverAdapter) Remove(name string) error { return s.FtpDriver.Remove(name) } -func (s *SftpDriverAdapter) Rename(old, new string, _ uint32) error { +func (s *DriverAdapter) Rename(old, new string, _ uint32) error { return s.FtpDriver.Rename(old, new) } -func (s *SftpDriverAdapter) Mkdir(name string, attr *sftpd.Attr) error { +func (s *DriverAdapter) Mkdir(name string, attr *sftpd.Attr) error { return s.FtpDriver.Mkdir(name, attr.Mode) } -func (s *SftpDriverAdapter) Rmdir(name string) error { +func (s *DriverAdapter) Rmdir(name string) error { return s.Remove(name) } -func (s *SftpDriverAdapter) Stat(name string, _ bool) (*sftpd.Attr, error) { +func (s *DriverAdapter) Stat(name string, _ bool) (*sftpd.Attr, error) { stat, err := s.FtpDriver.Stat(name) if err != nil { return nil, err @@ -46,27 +47,27 @@ func (s *SftpDriverAdapter) Stat(name string, _ bool) (*sftpd.Attr, error) { return fileInfoToSftpAttr(stat), nil } -func (s *SftpDriverAdapter) SetStat(_ string, _ *sftpd.Attr) error { +func (s *DriverAdapter) SetStat(_ string, _ *sftpd.Attr) error { return errs.NotSupport } -func (s *SftpDriverAdapter) ReadLink(_ string) (string, error) { +func (s *DriverAdapter) ReadLink(_ string) (string, error) { return "", errs.NotSupport } -func (s *SftpDriverAdapter) CreateLink(_, _ string, _ uint32) error { +func (s *DriverAdapter) CreateLink(_, _ string, _ uint32) error { return errs.NotSupport } -func (s *SftpDriverAdapter) RealPath(path string) (string, error) { +func (s *DriverAdapter) RealPath(path string) (string, error) { return utils.FixAndCleanPath(path), nil } -func (s *SftpDriverAdapter) GetHandle(name string, flags uint32, _ *sftpd.Attr, offset uint64) (sftpd.FileTransfer, error) { +func (s *DriverAdapter) GetHandle(name string, flags uint32, _ *sftpd.Attr, offset uint64) (sftpd.FileTransfer, error) { return s.FtpDriver.GetHandle(name, sftpFlagToOpenMode(flags), int64(offset)) } -func (s *SftpDriverAdapter) ReadDir(name string) ([]sftpd.NamedAttr, error) { +func (s *DriverAdapter) ReadDir(name string) ([]sftpd.NamedAttr, error) { dir, err := s.FtpDriver.ReadDir(name) if err != nil { return nil, err From 6745dcc139dba93eb86fb50ad7849494ef7a1b6b Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Mon, 30 Dec 2024 22:55:09 +0800 Subject: [PATCH 072/187] feat(task): attach creator to `user` of the context (#7729) --- internal/task/base.go | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/internal/task/base.go b/internal/task/base.go index 93f413a7..22b16741 100644 --- a/internal/task/base.go +++ b/internal/task/base.go @@ -1,17 +1,21 @@ package task import ( + "context" "github.com/alist-org/alist/v3/internal/model" "github.com/xhofe/tache" + "sync" "time" ) type TaskExtension struct { tache.Base - Creator *model.User - startTime *time.Time - endTime *time.Time - totalBytes int64 + ctx context.Context + ctxInitMutex sync.Mutex + Creator *model.User + startTime *time.Time + endTime *time.Time + totalBytes int64 } func (t *TaskExtension) SetCreator(creator *model.User) { @@ -51,6 +55,17 @@ func (t *TaskExtension) GetTotalBytes() int64 { return t.totalBytes } +func (t *TaskExtension) Ctx() context.Context { + if t.ctx == nil { + t.ctxInitMutex.Lock() + if t.ctx == nil { + t.ctx = context.WithValue(t.Base.Ctx(), "user", t.Creator) + } + t.ctxInitMutex.Unlock() + } + return t.ctx +} + type TaskExtensionInfo interface { tache.TaskWithInfo GetCreator() *model.User From 7fd4ac78515387e5962868aef41acb36ba80064c Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Mon, 30 Dec 2024 22:55:47 +0800 Subject: [PATCH 073/187] fix(139): update familyGetFiles pagination logic (#7748 close #7711) --- drivers/139/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/139/util.go b/drivers/139/util.go index ccb6a912..d0b4d3b4 100644 --- a/drivers/139/util.go +++ b/drivers/139/util.go @@ -252,7 +252,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { } files = append(files, &f) } - if 100*pageNum > resp.Data.TotalCount { + if resp.Data.TotalCount == 0 { break } pageNum++ From e4439e66b9a03d139607584c2290ca2a99f7e184 Mon Sep 17 00:00:00 2001 From: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Wed, 1 Jan 2025 21:13:34 +0800 Subject: [PATCH 074/187] fix:(baidu_photo): upload erron -6 (#7760 close #7744) * fix:(baidu_photo): upload erron -6 * fix(baidu_photo):api add bdstoken --- drivers/baidu_photo/driver.go | 13 ++++++++++--- drivers/baidu_photo/utils.go | 15 +++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/baidu_photo/driver.go b/drivers/baidu_photo/driver.go index d0d69e82..b584c9a3 100644 --- a/drivers/baidu_photo/driver.go +++ b/drivers/baidu_photo/driver.go @@ -28,8 +28,9 @@ type BaiduPhoto struct { Addition // AccessToken string - Uk int64 - root model.Obj + Uk int64 + bdstoken string + root model.Obj uploadThread int } @@ -73,6 +74,10 @@ func (d *BaiduPhoto) Init(ctx context.Context) error { if err != nil { return err } + d.bdstoken, err = d.getBDStoken() + if err != nil { + return err + } d.Uk, err = strconv.ParseInt(info.YouaID, 10, 64) return err } @@ -296,6 +301,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil _, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) { r.SetContext(ctx) r.SetFormData(params) + r.SetQueryParam("bdstoken", d.bdstoken) }, &precreateResp) if err != nil { return nil, err @@ -324,8 +330,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil "path": params["path"], "partseq": fmt.Sprint(partseq), "uploadid": precreateResp.UploadID, + "app_id": "16051585", } - _, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) { r.SetContext(ctx) r.SetQueryParams(uploadParams) @@ -352,6 +358,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil _, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) { r.SetContext(ctx) r.SetFormData(params) + r.SetQueryParam("bdstoken", d.bdstoken) }, &precreateResp) if err != nil { return nil, err diff --git a/drivers/baidu_photo/utils.go b/drivers/baidu_photo/utils.go index 0b960593..6061600e 100644 --- a/drivers/baidu_photo/utils.go +++ b/drivers/baidu_photo/utils.go @@ -476,6 +476,21 @@ func (d *BaiduPhoto) uInfo() (*UInfo, error) { return &info, nil } +func (d *BaiduPhoto) getBDStoken() (string, error) { + var info struct { + Result struct { + Bdstoken string `json:"bdstoken"` + Token string `json:"token"` + Uk int64 `json:"uk"` + } `json:"result"` + } + _, err := d.Get("https://pan.baidu.com/api/gettemplatevariable?fields=[%22bdstoken%22,%22token%22,%22uk%22]", nil, &info) + if err != nil { + return "", err + } + return info.Result.Bdstoken, nil +} + func DecryptMd5(encryptMd5 string) string { if _, err := hex.DecodeString(encryptMd5); err == nil { return encryptMd5 From 687124c81d8256ea202689fbca019d6fcffb10af Mon Sep 17 00:00:00 2001 From: Mmx <36563672+Mmx233@users.noreply.github.com> Date: Wed, 1 Jan 2025 21:29:59 +0800 Subject: [PATCH 075/187] ci(build_docker): merge build_docker into release_docker workflow (#7755) * feat(ci): merge build_docker workflow into release_docker * fix(ci): logics of docker meta --- .github/workflows/build_docker.yml | 126 --------------------------- .github/workflows/release_docker.yml | 28 +++++- 2 files changed, 24 insertions(+), 130 deletions(-) delete mode 100644 .github/workflows/build_docker.yml diff --git a/.github/workflows/build_docker.yml b/.github/workflows/build_docker.yml deleted file mode 100644 index 6384c374..00000000 --- a/.github/workflows/build_docker.yml +++ /dev/null @@ -1,126 +0,0 @@ -name: build_docker - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - build_docker: - name: Build Docker - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: xhofe/alist - tags: | - type=schedule - type=ref,event=branch - type=ref,event=tag - type=ref,event=pr - type=raw,value=beta,enable={{is_default_branch}} - - - name: Docker meta with ffmpeg - id: meta-ffmpeg - uses: docker/metadata-action@v5 - with: - images: xhofe/alist - flavor: | - suffix=-ffmpeg - tags: | - type=schedule - type=ref,event=branch - type=ref,event=tag - type=ref,event=pr - type=raw,value=beta,enable={{is_default_branch}} - - - uses: actions/setup-go@v5 - with: - go-version: 'stable' - - - name: Cache Musl - id: cache-musl - uses: actions/cache@v4 - with: - path: build/musl-libs - key: docker-musl-libs-v2 - - - name: Download Musl Library - if: steps.cache-musl.outputs.cache-hit != 'true' - run: bash build.sh prepare docker-multiplatform - - - name: Build go binary - run: bash build.sh dev docker-multiplatform - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to DockerHub - if: github.event_name == 'push' - uses: docker/login-action@v3 - with: - username: xhofe - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and push - id: docker_build - uses: docker/build-push-action@v6 - with: - context: . - file: Dockerfile.ci - push: ${{ github.event_name == 'push' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64 - - - name: Build and push with ffmpeg - id: docker_build_ffmpeg - uses: docker/build-push-action@v6 - with: - context: . - file: Dockerfile.ci - push: ${{ github.event_name == 'push' }} - tags: ${{ steps.meta-ffmpeg.outputs.tags }} - labels: ${{ steps.meta-ffmpeg.outputs.labels }} - build-args: INSTALL_FFMPEG=true - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64 - - build_docker_with_aria2: - needs: build_docker - name: Build docker with aria2 - runs-on: ubuntu-latest - if: github.event_name == 'push' - steps: - - name: Checkout repo - uses: actions/checkout@v4 - with: - repository: alist-org/with_aria2 - ref: main - persist-credentials: false - fetch-depth: 0 - - - name: Commit - run: | - git config --local user.email "bot@nn.ci" - git config --local user.name "IlaBot" - git commit --allow-empty -m "Trigger build for ${{ github.sha }}" - - - name: Push commit - uses: ad-m/github-push-action@master - with: - github_token: ${{ secrets.MY_TOKEN }} - branch: main - repository: alist-org/with_aria2 diff --git a/.github/workflows/release_docker.yml b/.github/workflows/release_docker.yml index 0f559a3f..f4c79baf 100644 --- a/.github/workflows/release_docker.yml +++ b/.github/workflows/release_docker.yml @@ -4,13 +4,30 @@ on: push: tags: - 'v*' + branches: + - main + pull_request: + branches: + - main + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true env: - IMAGE_REGISTRY: 'xhofe/alist' + REGISTRY: 'xhofe/alist' REGISTRY_USERNAME: 'xhofe' REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} ARTIFACT_NAME: 'binaries_docker_release' RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64' + IMAGE_PUSH: ${{ github.event_name == 'push' }} + IMAGE_IS_PROD: ${{ github.ref_type == 'tag' }} + IMAGE_TAGS_BETA: | + type=schedule + type=ref,event=branch + type=ref,event=tag + type=ref,event=pr + type=raw,value=beta,enable={{is_default_branch}} jobs: build_binary: @@ -85,8 +102,10 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Login to DockerHub + if: env.IMAGE_PUSH == 'true' uses: docker/login-action@v3 with: + logout: true username: ${{ env.REGISTRY_USERNAME }} password: ${{ env.REGISTRY_PASSWORD }} @@ -94,9 +113,10 @@ jobs: id: meta uses: docker/metadata-action@v5 with: - images: ${{ env.IMAGE_REGISTRY }} + images: ${{ env.REGISTRY }} + tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }} flavor: | - latest=true + ${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }} ${{ matrix.tag_favor }} - name: Build and push @@ -105,7 +125,7 @@ jobs: with: context: . file: Dockerfile.ci - push: true + push: ${{ env.IMAGE_PUSH == 'true' }} build-args: ${{ matrix.build_arg }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} From 31a74708653b2567c45b2db0b25db5f68d41c2ef Mon Sep 17 00:00:00 2001 From: Lin Tianchuan <47070449+1024th@users.noreply.github.com> Date: Fri, 10 Jan 2025 20:48:45 +0800 Subject: [PATCH 076/187] feat(local): support both time and percent for video thumbnail (#7802) * feat(local): support percent for video thumbnail The percentage determines the point in the video (as a percentage of the total duration) at which the thumbnail will be generated. * feat(local): support both time and percent for video thumbnail --- drivers/local/driver.go | 22 ++++++++++++++++ drivers/local/meta.go | 1 + drivers/local/util.go | 58 ++++++++++++++++++++++++++++++++++++++--- 3 files changed, 77 insertions(+), 4 deletions(-) diff --git a/drivers/local/driver.go b/drivers/local/driver.go index 2519232e..8a804ef3 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -79,6 +79,28 @@ func (d *Local) Init(ctx context.Context) error { } else { d.thumbTokenBucket = NewStaticTokenBucketWithMigration(d.thumbTokenBucket, d.thumbConcurrency) } + // Check the VideoThumbPos value + if d.VideoThumbPos == "" { + d.VideoThumbPos = "20%" + } + if strings.HasSuffix(d.VideoThumbPos, "%") { + percentage := strings.TrimSuffix(d.VideoThumbPos, "%") + val, err := strconv.ParseFloat(percentage, 64) + if err != nil { + return fmt.Errorf("invalid video_thumb_pos value: %s, err: %s", d.VideoThumbPos, err) + } + if val < 0 || val > 100 { + return fmt.Errorf("invalid video_thumb_pos value: %s, the precentage must be a number between 0 and 100", d.VideoThumbPos) + } + } else { + val, err := strconv.ParseFloat(d.VideoThumbPos, 64) + if err != nil { + return fmt.Errorf("invalid video_thumb_pos value: %s, err: %s", d.VideoThumbPos, err) + } + if val < 0 { + return fmt.Errorf("invalid video_thumb_pos value: %s, the time must be a positive number", d.VideoThumbPos) + } + } return nil } diff --git a/drivers/local/meta.go b/drivers/local/meta.go index 5ffac920..14b0404f 100644 --- a/drivers/local/meta.go +++ b/drivers/local/meta.go @@ -10,6 +10,7 @@ type Addition struct { Thumbnail bool `json:"thumbnail" required:"true" help:"enable thumbnail"` ThumbCacheFolder string `json:"thumb_cache_folder"` ThumbConcurrency string `json:"thumb_concurrency" default:"16" required:"false" help:"Number of concurrent thumbnail generation goroutines. This controls how many thumbnails can be generated in parallel."` + VideoThumbPos string `json:"video_thumb_pos" default:"20%" required:"false" help:"The position of the video thumbnail. If the value is a number (integer ot floating point), it represents the time in seconds. If the value ends with '%', it represents the percentage of the video duration."` ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"` MkdirPerm string `json:"mkdir_perm" default:"777"` RecycleBinPath string `json:"recycle_bin_path" default:"delete permanently" help:"path to recycle bin, delete permanently if empty or keep 'delete permanently'"` diff --git a/drivers/local/util.go b/drivers/local/util.go index b994c205..d2fbd097 100644 --- a/drivers/local/util.go +++ b/drivers/local/util.go @@ -2,11 +2,13 @@ package local import ( "bytes" + "encoding/json" "fmt" "io/fs" "os" "path/filepath" "sort" + "strconv" "strings" "github.com/alist-org/alist/v3/internal/conf" @@ -34,10 +36,58 @@ func isSymlinkDir(f fs.FileInfo, path string) bool { return false } -func GetSnapshot(videoPath string, frameNum int) (imgData *bytes.Buffer, err error) { +// Get the snapshot of the video +func (d *Local) GetSnapshot(videoPath string) (imgData *bytes.Buffer, err error) { + // Run ffprobe to get the video duration + jsonOutput, err := ffmpeg.Probe(videoPath) + if err != nil { + return nil, err + } + // get format.duration from the json string + type probeFormat struct { + Duration string `json:"duration"` + } + type probeData struct { + Format probeFormat `json:"format"` + } + var probe probeData + err = json.Unmarshal([]byte(jsonOutput), &probe) + if err != nil { + return nil, err + } + totalDuration, err := strconv.ParseFloat(probe.Format.Duration, 64) + if err != nil { + return nil, err + } + + var ss string + if strings.HasSuffix(d.VideoThumbPos, "%") { + percentage, err := strconv.ParseFloat(strings.TrimSuffix(d.VideoThumbPos, "%"), 64) + if err != nil { + return nil, err + } + ss = fmt.Sprintf("%f", totalDuration*percentage/100) + } else { + val, err := strconv.ParseFloat(d.VideoThumbPos, 64) + if err != nil { + return nil, err + } + // If the value is greater than the total duration, use the total duration + if val > totalDuration { + ss = fmt.Sprintf("%f", totalDuration) + } else { + ss = d.VideoThumbPos + } + } + + // Run ffmpeg to get the snapshot srcBuf := bytes.NewBuffer(nil) - stream := ffmpeg.Input(videoPath). - Filter("select", ffmpeg.Args{fmt.Sprintf("gte(n,%d)", frameNum)}). + // If the remaining time from the seek point to the end of the video is less + // than the duration of a single frame, ffmpeg cannot extract any frames + // within the specified range and will exit with an error. + // The "noaccurate_seek" option prevents this error and would also speed up + // the seek process. + stream := ffmpeg.Input(videoPath, ffmpeg.KwArgs{"ss": ss, "noaccurate_seek": ""}). Output("pipe:", ffmpeg.KwArgs{"vframes": 1, "format": "image2", "vcodec": "mjpeg"}). GlobalArgs("-loglevel", "error").Silent(true). WithOutput(srcBuf, os.Stdout) @@ -77,7 +127,7 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) { } var srcBuf *bytes.Buffer if utils.GetFileType(file.GetName()) == conf.VIDEO { - videoBuf, err := GetSnapshot(fullPath, 10) + videoBuf, err := d.GetSnapshot(fullPath) if err != nil { return nil, nil, err } From 6812ec9a6d7c4a1684f23d23d7d2a06a48bc635d Mon Sep 17 00:00:00 2001 From: Jiang Xiang <869914918@qq.com> Date: Fri, 10 Jan 2025 20:49:50 +0800 Subject: [PATCH 077/187] fix(ilanzou): add accept-encoding request header (#7796 close #7759) --- drivers/ilanzou/util.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/ilanzou/util.go b/drivers/ilanzou/util.go index a57e2a4a..b8fd5280 100644 --- a/drivers/ilanzou/util.go +++ b/drivers/ilanzou/util.go @@ -69,9 +69,10 @@ func (d *ILanZou) request(pathname, method string, callback base.ReqCallback, pr req := base.RestyClient.R() req.SetHeaders(map[string]string{ - "Origin": d.conf.site, - "Referer": d.conf.site + "/", - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0", + "Origin": d.conf.site, + "Referer": d.conf.site + "/", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0", + "Accept-Encoding": "gzip, deflate, br, zstd", }) if callback != nil { From 25b4b55ee108576217259bd850781f5a5f4fc3ce Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Fri, 10 Jan 2025 20:50:20 +0800 Subject: [PATCH 078/187] feat(ftp-server): support resumable downloading (#7792) --- go.mod | 4 ++-- go.sum | 8 ++++---- server/ftp/afero.go | 8 ++++---- server/ftp/fsread.go | 47 ++++++++++++++++++++++++++++++++++++++------ server/ftp/fsup.go | 2 +- 5 files changed, 52 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 1deaa1d5..7ca66e15 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.4 require ( github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 - github.com/KirCute/sftpd-alist v0.0.11 + github.com/KirCute/sftpd-alist v0.0.12 github.com/SheltonZhu/115driver v1.0.32 github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4 @@ -62,7 +62,7 @@ require ( github.com/xhofe/tache v0.1.3 github.com/xhofe/wopan-sdk-go v0.1.3 github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 - golang.org/x/crypto v0.30.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e golang.org/x/image v0.19.0 golang.org/x/net v0.28.0 diff --git a/go.sum b/go.sum index a4e8e12d..101a0bea 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po= github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E= -github.com/KirCute/sftpd-alist v0.0.11 h1:BGInXmmLBI+v6S9WZCwvY0DRK1vDprGNcTv/57p2GSo= -github.com/KirCute/sftpd-alist v0.0.11/go.mod h1:pPFzr6GrKqXvFXLr46ZpoqmtSpwH8DKTYloSp/ybzKQ= +github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnOuISdg= +github.com/KirCute/sftpd-alist v0.0.12/go.mod h1:2wNK7yyW2XfjyJq10OY6xB4COLac64hOwfV6clDJn6s= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= @@ -574,8 +574,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= diff --git a/server/ftp/afero.go b/server/ftp/afero.go index 448744b1..75ae2e43 100644 --- a/server/ftp/afero.go +++ b/server/ftp/afero.go @@ -83,9 +83,6 @@ func (a *AferoAdapter) ReadDir(name string) ([]os.FileInfo, error) { func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserver.FileTransfer, error) { fileSize := a.nextFileSize a.nextFileSize = 0 - if offset != 0 { - return nil, errs.NotSupport - } if (flags & os.O_SYNC) != 0 { return nil, errs.NotSupport } @@ -106,6 +103,9 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve return nil, errors.New("file already exists") } if (flags & os.O_WRONLY) != 0 { + if offset != 0 { + return nil, errs.NotSupport + } trunc := (flags & os.O_TRUNC) != 0 if fileSize > 0 { return OpenUploadWithLength(a.ctx, path, trunc, fileSize) @@ -113,7 +113,7 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve return OpenUpload(a.ctx, path, trunc) } } - return OpenDownload(a.ctx, path) + return OpenDownload(a.ctx, path, offset) } func (a *AferoAdapter) SetNextFileSize(size int64) { diff --git a/server/ftp/fsread.go b/server/ftp/fsread.go index 74d184b6..257d2ec8 100644 --- a/server/ftp/fsread.go +++ b/server/ftp/fsread.go @@ -8,6 +8,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/server/common" "github.com/pkg/errors" "io" @@ -19,10 +20,12 @@ import ( type FileDownloadProxy struct { ftpserver.FileTransfer - reader io.ReadCloser + ss *stream.SeekableStream + reader io.Reader + cur int64 } -func OpenDownload(ctx context.Context, reqPath string) (*FileDownloadProxy, error) { +func OpenDownload(ctx context.Context, reqPath string, offset int64) (*FileDownloadProxy, error) { user := ctx.Value("user").(*model.User) meta, err := op.GetNearestMeta(reqPath) if err != nil { @@ -52,11 +55,22 @@ func OpenDownload(ctx context.Context, reqPath string) (*FileDownloadProxy, erro if err != nil { return nil, err } - return &FileDownloadProxy{reader: ss}, nil + var reader io.Reader + if offset != 0 { + reader, err = ss.RangeRead(http_range.Range{Start: offset, Length: -1}) + if err != nil { + return nil, err + } + } else { + reader = ss + } + return &FileDownloadProxy{ss: ss, reader: reader}, nil } func (f *FileDownloadProxy) Read(p []byte) (n int, err error) { - return f.reader.Read(p) + n, err = f.reader.Read(p) + f.cur += int64(n) + return n, err } func (f *FileDownloadProxy) Write(p []byte) (n int, err error) { @@ -64,11 +78,32 @@ func (f *FileDownloadProxy) Write(p []byte) (n int, err error) { } func (f *FileDownloadProxy) Seek(offset int64, whence int) (int64, error) { - return 0, errs.NotSupport + switch whence { + case io.SeekStart: + break + case io.SeekCurrent: + offset += f.cur + break + case io.SeekEnd: + offset += f.ss.GetSize() + break + default: + return 0, errs.NotSupport + } + if offset < 0 { + return 0, errors.New("Seek: negative position") + } + reader, err := f.ss.RangeRead(http_range.Range{Start: offset, Length: -1}) + if err != nil { + return f.cur, err + } + f.cur = offset + f.reader = reader + return offset, nil } func (f *FileDownloadProxy) Close() error { - return f.reader.Close() + return f.ss.Close() } type OsFileInfoAdapter struct { diff --git a/server/ftp/fsup.go b/server/ftp/fsup.go index 96c84681..4d626d0e 100644 --- a/server/ftp/fsup.go +++ b/server/ftp/fsup.go @@ -63,7 +63,7 @@ func (f *FileUploadProxy) Write(p []byte) (n int, err error) { } func (f *FileUploadProxy) Seek(offset int64, whence int) (int64, error) { - return 0, errs.NotSupport + return f.buffer.Seek(offset, whence) } func (f *FileUploadProxy) Close() error { From 51bcf83511be2bc05ff73d2450784d1a14d9d973 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Fri, 10 Jan 2025 20:50:56 +0800 Subject: [PATCH 079/187] feat(url-tree): support url tree driver writing (#7779 close #5166) * feat: support url tree writing * fix: meta writable * feat: disable writable via addition --- drivers/url_tree/driver.go | 181 +++++++++++++++++++++++++- drivers/url_tree/meta.go | 3 +- drivers/url_tree/types.go | 18 +++ drivers/url_tree/util.go | 46 +++++++ internal/driver/driver.go | 14 ++ internal/offline_download/tool/add.go | 47 +++++-- internal/op/fs.go | 40 ++++++ server/handles/offline_download.go | 4 +- 8 files changed, 338 insertions(+), 15 deletions(-) diff --git a/drivers/url_tree/driver.go b/drivers/url_tree/driver.go index 6a45bb7d..569b3fba 100644 --- a/drivers/url_tree/driver.go +++ b/drivers/url_tree/driver.go @@ -2,7 +2,11 @@ package url_tree import ( "context" + "errors" + "github.com/alist-org/alist/v3/internal/op" stdpath "path" + "strings" + "sync" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -14,7 +18,8 @@ import ( type Urls struct { model.Storage Addition - root *Node + root *Node + mutex sync.RWMutex } func (d *Urls) Config() driver.Config { @@ -40,11 +45,15 @@ func (d *Urls) Drop(ctx context.Context) error { } func (d *Urls) Get(ctx context.Context, path string) (model.Obj, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() node := GetNodeFromRootByPath(d.root, path) return nodeToObj(node, path) } func (d *Urls) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() node := GetNodeFromRootByPath(d.root, dir.GetPath()) log.Debugf("path: %s, node: %+v", dir.GetPath(), node) if node == nil { @@ -59,6 +68,8 @@ func (d *Urls) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([] } func (d *Urls) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() node := GetNodeFromRootByPath(d.root, file.GetPath()) log.Debugf("path: %s, node: %+v", file.GetPath(), node) if node == nil { @@ -72,6 +83,174 @@ func (d *Urls) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* return nil, errs.NotFile } +func (d *Urls) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + node := GetNodeFromRootByPath(d.root, parentDir.GetPath()) + if node == nil { + return nil, errs.ObjectNotFound + } + if node.isFile() { + return nil, errs.NotFolder + } + dir := &Node{ + Name: dirName, + Level: node.Level + 1, + } + node.Children = append(node.Children, dir) + d.updateStorage() + return nodeToObj(dir, stdpath.Join(parentDir.GetPath(), dirName)) +} + +func (d *Urls) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) { + return nil, errors.New("cannot move parent dir to child") + } + d.mutex.Lock() + defer d.mutex.Unlock() + dstNode := GetNodeFromRootByPath(d.root, dstDir.GetPath()) + if dstNode == nil || dstNode.isFile() { + return nil, errs.NotFolder + } + srcDir, srcName := stdpath.Split(srcObj.GetPath()) + srcParentNode := GetNodeFromRootByPath(d.root, srcDir) + if srcParentNode == nil { + return nil, errs.ObjectNotFound + } + newChildren := make([]*Node, 0, len(srcParentNode.Children)) + var srcNode *Node + for _, child := range srcParentNode.Children { + if child.Name == srcName { + srcNode = child + } else { + newChildren = append(newChildren, child) + } + } + if srcNode == nil { + return nil, errs.ObjectNotFound + } + srcParentNode.Children = newChildren + srcNode.setLevel(dstNode.Level + 1) + dstNode.Children = append(dstNode.Children, srcNode) + d.root.calSize() + d.updateStorage() + return nodeToObj(srcNode, stdpath.Join(dstDir.GetPath(), srcName)) +} + +func (d *Urls) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + srcNode := GetNodeFromRootByPath(d.root, srcObj.GetPath()) + if srcNode == nil { + return nil, errs.ObjectNotFound + } + srcNode.Name = newName + d.updateStorage() + return nodeToObj(srcNode, stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName)) +} + +func (d *Urls) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) { + return nil, errors.New("cannot copy parent dir to child") + } + d.mutex.Lock() + defer d.mutex.Unlock() + dstNode := GetNodeFromRootByPath(d.root, dstDir.GetPath()) + if dstNode == nil || dstNode.isFile() { + return nil, errs.NotFolder + } + srcNode := GetNodeFromRootByPath(d.root, srcObj.GetPath()) + if srcNode == nil { + return nil, errs.ObjectNotFound + } + newNode := srcNode.deepCopy(dstNode.Level + 1) + dstNode.Children = append(dstNode.Children, newNode) + d.root.calSize() + d.updateStorage() + return nodeToObj(newNode, stdpath.Join(dstDir.GetPath(), stdpath.Base(srcObj.GetPath()))) +} + +func (d *Urls) Remove(ctx context.Context, obj model.Obj) error { + if !d.Writable { + return errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + objDir, objName := stdpath.Split(obj.GetPath()) + nodeParent := GetNodeFromRootByPath(d.root, objDir) + if nodeParent == nil { + return errs.ObjectNotFound + } + newChildren := make([]*Node, 0, len(nodeParent.Children)) + var deletedObj *Node + for _, child := range nodeParent.Children { + if child.Name != objName { + newChildren = append(newChildren, child) + } else { + deletedObj = child + } + } + if deletedObj == nil { + return errs.ObjectNotFound + } + nodeParent.Children = newChildren + if deletedObj.Size > 0 { + d.root.calSize() + } + d.updateStorage() + return nil +} + +func (d *Urls) PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + dirNode := GetNodeFromRootByPath(d.root, dstDir.GetPath()) + if dirNode == nil || dirNode.isFile() { + return nil, errs.NotFolder + } + newNode := &Node{ + Name: name, + Level: dirNode.Level + 1, + Url: url, + } + dirNode.Children = append(dirNode.Children, newNode) + if d.HeadSize { + size, err := getSizeFromUrl(url) + if err != nil { + log.Errorf("get size from url error: %s", err) + } else { + newNode.Size = size + d.root.calSize() + } + } + d.updateStorage() + return nodeToObj(newNode, stdpath.Join(dstDir.GetPath(), name)) +} + +func (d *Urls) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + return errs.UploadNotSupported +} + +func (d *Urls) updateStorage() { + d.UrlStructure = StringifyTree(d.root) + op.MustSaveDriverStorage(d) +} + //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/url_tree/meta.go b/drivers/url_tree/meta.go index b3ae33dc..c40414f5 100644 --- a/drivers/url_tree/meta.go +++ b/drivers/url_tree/meta.go @@ -12,6 +12,7 @@ type Addition struct { // define other UrlStructure string `json:"url_structure" type:"text" required:"true" default:"https://jsd.nn.ci/gh/alist-org/alist/README.md\nhttps://jsd.nn.ci/gh/alist-org/alist/README_cn.md\nfolder:\n CONTRIBUTING.md:1635:https://jsd.nn.ci/gh/alist-org/alist/CONTRIBUTING.md\n CODE_OF_CONDUCT.md:2093:https://jsd.nn.ci/gh/alist-org/alist/CODE_OF_CONDUCT.md" help:"structure:FolderName:\n [FileName:][FileSize:][Modified:]Url"` HeadSize bool `json:"head_size" type:"bool" default:"false" help:"Use head method to get file size, but it may be failed."` + Writable bool `json:"writable" type:"bool" default:"false"` } var config = driver.Config{ @@ -20,7 +21,7 @@ var config = driver.Config{ OnlyLocal: false, OnlyProxy: false, NoCache: true, - NoUpload: true, + NoUpload: false, NeedMs: false, DefaultRoot: "", CheckStatus: true, diff --git a/drivers/url_tree/types.go b/drivers/url_tree/types.go index 7e8ca3d9..cf62d29d 100644 --- a/drivers/url_tree/types.go +++ b/drivers/url_tree/types.go @@ -1,5 +1,7 @@ package url_tree +import "github.com/alist-org/alist/v3/pkg/utils" + // Node is a node in the folder tree type Node struct { Url string @@ -44,3 +46,19 @@ func (node *Node) calSize() int64 { node.Size = size return size } + +func (node *Node) setLevel(level int) { + node.Level = level + for _, child := range node.Children { + child.setLevel(level + 1) + } +} + +func (node *Node) deepCopy(level int) *Node { + ret := *node + ret.Level = level + ret.Children, _ = utils.SliceConvert(ret.Children, func(child *Node) (*Node, error) { + return child.deepCopy(level + 1), nil + }) + return &ret +} diff --git a/drivers/url_tree/util.go b/drivers/url_tree/util.go index 4065218f..61a3fde2 100644 --- a/drivers/url_tree/util.go +++ b/drivers/url_tree/util.go @@ -153,6 +153,9 @@ func splitPath(path string) []string { if path == "/" { return []string{"root"} } + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } parts := strings.Split(path, "/") parts[0] = "root" return parts @@ -190,3 +193,46 @@ func getSizeFromUrl(url string) (int64, error) { } return size, nil } + +func StringifyTree(node *Node) string { + sb := strings.Builder{} + if node.Level == -1 { + for i, child := range node.Children { + sb.WriteString(StringifyTree(child)) + if i < len(node.Children)-1 { + sb.WriteString("\n") + } + } + return sb.String() + } + for i := 0; i < node.Level; i++ { + sb.WriteString(" ") + } + if node.Url == "" { + sb.WriteString(node.Name) + sb.WriteString(":") + for _, child := range node.Children { + sb.WriteString("\n") + sb.WriteString(StringifyTree(child)) + } + } else if node.Size == 0 && node.Modified == 0 { + if stdpath.Base(node.Url) == node.Name { + sb.WriteString(node.Url) + } else { + sb.WriteString(fmt.Sprintf("%s:%s", node.Name, node.Url)) + } + } else { + sb.WriteString(node.Name) + sb.WriteString(":") + if node.Size != 0 || node.Modified != 0 { + sb.WriteString(strconv.FormatInt(node.Size, 10)) + sb.WriteString(":") + } + if node.Modified != 0 { + sb.WriteString(strconv.FormatInt(node.Modified, 10)) + sb.WriteString(":") + } + sb.WriteString(node.Url) + } + return sb.String() +} diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 781e8532..6fd5e8d6 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -80,6 +80,13 @@ type Put interface { Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) error } +type PutURL interface { + // PutURL directly put a URL into the storage + // Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs + // Called when using SimpleHttp for offline downloading, skipping creating a download task + PutURL(ctx context.Context, dstDir model.Obj, name, url string) error +} + //type WriteResult interface { // MkdirResult // MoveResult @@ -109,6 +116,13 @@ type PutResult interface { Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) (model.Obj, error) } +type PutURLResult interface { + // PutURL directly put a URL into the storage + // Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs + // Called when using SimpleHttp for offline downloading, skipping creating a download task + PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error) +} + type UpdateProgress func(percentage float64) type Progress struct { diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go index 4158051a..405f96cb 100644 --- a/internal/offline_download/tool/add.go +++ b/internal/offline_download/tool/add.go @@ -2,8 +2,11 @@ package tool import ( "context" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/task" + "net/url" + "path" "path/filepath" "github.com/alist-org/alist/v3/internal/conf" @@ -30,18 +33,6 @@ type AddURLArgs struct { } func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, error) { - // get tool - tool, err := Tools.Get(args.Tool) - if err != nil { - return nil, errors.Wrapf(err, "failed get tool") - } - // check tool is ready - if !tool.IsReady() { - // try to init tool - if _, err := tool.Init(); err != nil { - return nil, errors.Wrapf(err, "failed init tool %s", args.Tool) - } - } // check storage storage, dstDirActualPath, err := op.GetStorageAndActualPath(args.DstDirPath) if err != nil { @@ -63,6 +54,23 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro return nil, errors.WithStack(errs.NotFolder) } } + // try putting url + if args.Tool == "SimpleHttp" && tryPutUrl(ctx, storage, dstDirActualPath, args.URL) { + return nil, nil + } + + // get tool + tool, err := Tools.Get(args.Tool) + if err != nil { + return nil, errors.Wrapf(err, "failed get tool") + } + // check tool is ready + if !tool.IsReady() { + // try to init tool + if _, err := tool.Init(); err != nil { + return nil, errors.Wrapf(err, "failed init tool %s", args.Tool) + } + } uid := uuid.NewString() tempDir := filepath.Join(conf.Conf.TempDir, args.Tool, uid) @@ -98,3 +106,18 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro DownloadTaskManager.Add(t) return t, nil } + +func tryPutUrl(ctx context.Context, storage driver.Driver, dstDirActualPath, urlStr string) bool { + _, ok := storage.(driver.PutURL) + _, okResult := storage.(driver.PutURLResult) + if !ok && !okResult { + return false + } + u, err := url.Parse(urlStr) + if err != nil { + return false + } + dstName := path.Base(u.Path) + err = op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr) + return err == nil +} diff --git a/internal/op/fs.go b/internal/op/fs.go index e49c941a..01727e75 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -586,3 +586,43 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod } return errors.WithStack(err) } + +func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url string, lazyCache ...bool) error { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + dstDirPath = utils.FixAndCleanPath(dstDirPath) + _, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName)) + if err == nil { + return errors.New("obj already exists") + } + err = MakeDir(ctx, storage, dstDirPath) + if err != nil { + return errors.WithMessagef(err, "failed to put url") + } + dstDir, err := GetUnwrap(ctx, storage, dstDirPath) + if err != nil { + return errors.WithMessagef(err, "failed to put url") + } + switch s := storage.(type) { + case driver.PutURLResult: + var newObj model.Obj + newObj, err = s.PutURL(ctx, dstDir, dstName, url) + if err == nil { + if newObj != nil { + addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + } else if !utils.IsBool(lazyCache...) { + ClearCache(storage, dstDirPath) + } + } + case driver.PutURL: + err = s.PutURL(ctx, dstDir, dstName, url) + if err == nil && !utils.IsBool(lazyCache...) { + ClearCache(storage, dstDirPath) + } + default: + return errs.NotImplement + } + log.Debugf("put url [%s](%s) done", dstName, url) + return errors.WithStack(err) +} diff --git a/server/handles/offline_download.go b/server/handles/offline_download.go index 9e26030a..c7b7af76 100644 --- a/server/handles/offline_download.go +++ b/server/handles/offline_download.go @@ -145,7 +145,9 @@ func AddOfflineDownload(c *gin.Context) { common.ErrorResp(c, err, 500) return } - tasks = append(tasks, t) + if t != nil { + tasks = append(tasks, t) + } } common.SuccessResp(c, gin.H{ "tasks": getTaskInfos(tasks), From e04114d10246734c3ce0a5aa1719e61cee75dc4c Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Fri, 10 Jan 2025 20:59:58 +0800 Subject: [PATCH 080/187] feat(github): add github api driver (#7717) * feat(github): add github api driver * fix: filter submodule operation * feat: rename, copy and move, but with bugs * fix: move and copy returns 422 * fix: change TargetPath in rename msg from parent path to new self path * fix: add non-commit mutex * pref(github): use net/http to put blob * chore: add a help message to `ref` addition --- drivers/all.go | 1 + drivers/github/driver.go | 928 +++++++++++++++++++++++++++++++++++++++ drivers/github/meta.go | 36 ++ drivers/github/types.go | 102 +++++ drivers/github/util.go | 115 +++++ 5 files changed, 1182 insertions(+) create mode 100644 drivers/github/driver.go create mode 100644 drivers/github/meta.go create mode 100644 drivers/github/types.go create mode 100644 drivers/github/util.go diff --git a/drivers/all.go b/drivers/all.go index 4c4ef5c1..8b253a08 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -24,6 +24,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/dropbox" _ "github.com/alist-org/alist/v3/drivers/febbox" _ "github.com/alist-org/alist/v3/drivers/ftp" + _ "github.com/alist-org/alist/v3/drivers/github" _ "github.com/alist-org/alist/v3/drivers/google_drive" _ "github.com/alist-org/alist/v3/drivers/google_photo" _ "github.com/alist-org/alist/v3/drivers/halalcloud" diff --git a/drivers/github/driver.go b/drivers/github/driver.go new file mode 100644 index 00000000..ea8f6276 --- /dev/null +++ b/drivers/github/driver.go @@ -0,0 +1,928 @@ +package github + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" + "io" + "net/http" + stdpath "path" + "strings" + "sync" + "text/template" +) + +type Github struct { + model.Storage + Addition + client *resty.Client + mkdirMsgTmpl *template.Template + deleteMsgTmpl *template.Template + putMsgTmpl *template.Template + renameMsgTmpl *template.Template + copyMsgTmpl *template.Template + moveMsgTmpl *template.Template + isOnBranch bool + commitMutex sync.Mutex +} + +func (d *Github) Config() driver.Config { + return config +} + +func (d *Github) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Github) Init(ctx context.Context) error { + d.RootFolderPath = utils.FixAndCleanPath(d.RootFolderPath) + if d.CommitterName != "" && d.CommitterEmail == "" { + return errors.New("committer email is required") + } + if d.CommitterName == "" && d.CommitterEmail != "" { + return errors.New("committer name is required") + } + if d.AuthorName != "" && d.AuthorEmail == "" { + return errors.New("author email is required") + } + if d.AuthorName == "" && d.AuthorEmail != "" { + return errors.New("author name is required") + } + var err error + d.mkdirMsgTmpl, err = template.New("mkdirCommitMsgTemplate").Parse(d.MkdirCommitMsg) + if err != nil { + return err + } + d.deleteMsgTmpl, err = template.New("deleteCommitMsgTemplate").Parse(d.DeleteCommitMsg) + if err != nil { + return err + } + d.putMsgTmpl, err = template.New("putCommitMsgTemplate").Parse(d.PutCommitMsg) + if err != nil { + return err + } + d.renameMsgTmpl, err = template.New("renameCommitMsgTemplate").Parse(d.RenameCommitMsg) + if err != nil { + return err + } + d.copyMsgTmpl, err = template.New("copyCommitMsgTemplate").Parse(d.CopyCommitMsg) + if err != nil { + return err + } + d.moveMsgTmpl, err = template.New("moveCommitMsgTemplate").Parse(d.MoveCommitMsg) + if err != nil { + return err + } + d.client = base.NewRestyClient(). + SetHeader("Accept", "application/vnd.github.object+json"). + SetHeader("Authorization", "Bearer "+d.Token). + SetHeader("X-GitHub-Api-Version", "2022-11-28"). + SetLogger(log.StandardLogger()). + SetDebug(false) + if d.Ref == "" { + repo, err := d.getRepo() + if err != nil { + return err + } + d.Ref = repo.DefaultBranch + d.isOnBranch = true + } else { + _, err = d.getBranchHead() + d.isOnBranch = err == nil + } + return nil +} + +func (d *Github) Drop(ctx context.Context) error { + return nil +} + +func (d *Github) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + obj, err := d.get(dir.GetPath()) + if err != nil { + return nil, err + } + if obj.Entries == nil { + return nil, errs.NotFolder + } + if len(obj.Entries) >= 1000 { + tree, err := d.getTree(obj.Sha) + if err != nil { + return nil, err + } + if tree.Truncated { + return nil, fmt.Errorf("tree %s is truncated", dir.GetPath()) + } + ret := make([]model.Obj, 0, len(tree.Trees)) + for _, t := range tree.Trees { + if t.Path != ".gitkeep" { + ret = append(ret, t.toModelObj()) + } + } + return ret, nil + } else { + ret := make([]model.Obj, 0, len(obj.Entries)) + for _, entry := range obj.Entries { + if entry.Name != ".gitkeep" { + ret = append(ret, entry.toModelObj()) + } + } + return ret, nil + } +} + +func (d *Github) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + obj, err := d.get(file.GetPath()) + if err != nil { + return nil, err + } + if obj.Type == "submodule" { + return nil, errors.New("cannot download a submodule") + } + return &model.Link{ + URL: obj.DownloadURL, + }, nil +} + +func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + parent, err := d.get(parentDir.GetPath()) + if err != nil { + return err + } + if parent.Entries == nil { + return errs.NotFolder + } + // if parent folder contains .gitkeep only, mark it and delete .gitkeep later + gitKeepSha := "" + if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" { + gitKeepSha = parent.Entries[0].Sha + } + + commitMessage, err := getMessage(d.mkdirMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: dirName, + ObjPath: stdpath.Join(parentDir.GetPath(), dirName), + ParentName: parentDir.GetName(), + ParentPath: parentDir.GetPath(), + }, "mkdir") + if err != nil { + return err + } + if err = d.createGitKeep(stdpath.Join(parentDir.GetPath(), dirName), commitMessage); err != nil { + return err + } + if gitKeepSha != "" { + err = d.delete(stdpath.Join(parentDir.GetPath(), ".gitkeep"), gitKeepSha, commitMessage) + } + return err +} + +func (d *Github) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) { + return errors.New("cannot move parent dir to child") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + + var rootSha string + if strings.HasPrefix(dstDir.GetPath(), stdpath.Dir(srcObj.GetPath())) { // /aa/1 -> /aa/bb/ + dstOldSha, dstNewSha, ancestorOldSha, srcParentTree, err := d.copyWithoutRenewTree(srcObj, dstDir) + if err != nil { + return err + } + + srcParentPath := stdpath.Dir(srcObj.GetPath()) + dstRest := dstDir.GetPath()[len(srcParentPath):] + if dstRest[0] == '/' { + dstRest = dstRest[1:] + } + dstNextName, _, _ := strings.Cut(dstRest, "/") + dstNextPath := stdpath.Join(srcParentPath, dstNextName) + dstNextTreeSha, err := d.renewParentTrees(dstDir.GetPath(), dstOldSha, dstNewSha, dstNextPath) + if err != nil { + return err + } + var delSrc, dstNextTree *TreeObjReq = nil, nil + for _, t := range srcParentTree.Trees { + if t.Path == dstNextName { + dstNextTree = &t.TreeObjReq + dstNextTree.Sha = dstNextTreeSha + } + if t.Path == srcObj.GetName() { + delSrc = &t.TreeObjReq + delSrc.Sha = nil + } + if delSrc != nil && dstNextTree != nil { + break + } + } + if delSrc == nil || dstNextTree == nil { + return errs.ObjectNotFound + } + ancestorNewSha, err := d.newTree(ancestorOldSha, []interface{}{*delSrc, *dstNextTree}) + if err != nil { + return err + } + rootSha, err = d.renewParentTrees(srcParentPath, ancestorOldSha, ancestorNewSha, "/") + if err != nil { + return err + } + } else if strings.HasPrefix(srcObj.GetPath(), dstDir.GetPath()) { // /aa/bb/1 -> /aa/ + srcParentPath := stdpath.Dir(srcObj.GetPath()) + srcParentTree, srcParentOldSha, err := d.getTreeDirectly(srcParentPath) + if err != nil { + return err + } + var src *TreeObjReq = nil + for _, t := range srcParentTree.Trees { + if t.Path == srcObj.GetName() { + if t.Type == "commit" { + return errors.New("cannot move a submodule") + } + src = &t.TreeObjReq + break + } + } + if src == nil { + return errs.ObjectNotFound + } + + delSrc := *src + delSrc.Sha = nil + delSrcTree := make([]interface{}, 0, 2) + delSrcTree = append(delSrcTree, delSrc) + if len(srcParentTree.Trees) == 1 { + delSrcTree = append(delSrcTree, map[string]string{ + "path": ".gitkeep", + "mode": "100644", + "type": "blob", + "content": "", + }) + } + srcParentNewSha, err := d.newTree(srcParentOldSha, delSrcTree) + if err != nil { + return err + } + srcRest := srcObj.GetPath()[len(dstDir.GetPath()):] + if srcRest[0] == '/' { + srcRest = srcRest[1:] + } + srcNextName, _, ok := strings.Cut(srcRest, "/") + if !ok { // /aa/1 -> /aa/ + return errors.New("cannot move in place") + } + srcNextPath := stdpath.Join(dstDir.GetPath(), srcNextName) + srcNextTreeSha, err := d.renewParentTrees(srcParentPath, srcParentOldSha, srcParentNewSha, srcNextPath) + if err != nil { + return err + } + + ancestorTree, ancestorOldSha, err := d.getTreeDirectly(dstDir.GetPath()) + if err != nil { + return err + } + var srcNextTree *TreeObjReq = nil + for _, t := range ancestorTree.Trees { + if t.Path == srcNextName { + srcNextTree = &t.TreeObjReq + srcNextTree.Sha = srcNextTreeSha + break + } + } + if srcNextTree == nil { + return errs.ObjectNotFound + } + ancestorNewSha, err := d.newTree(ancestorOldSha, []interface{}{*srcNextTree, *src}) + if err != nil { + return err + } + rootSha, err = d.renewParentTrees(dstDir.GetPath(), ancestorOldSha, ancestorNewSha, "/") + if err != nil { + return err + } + } else { // /aa/1 -> /bb/ + // do copy + dstOldSha, dstNewSha, srcParentOldSha, srcParentTree, err := d.copyWithoutRenewTree(srcObj, dstDir) + if err != nil { + return err + } + + // delete src object and create new tree + var srcNewTree *TreeObjReq = nil + for _, t := range srcParentTree.Trees { + if t.Path == srcObj.GetName() { + srcNewTree = &t.TreeObjReq + srcNewTree.Sha = nil + break + } + } + if srcNewTree == nil { + return errs.ObjectNotFound + } + delSrcTree := make([]interface{}, 0, 2) + delSrcTree = append(delSrcTree, *srcNewTree) + if len(srcParentTree.Trees) == 1 { + delSrcTree = append(delSrcTree, map[string]string{ + "path": ".gitkeep", + "mode": "100644", + "type": "blob", + "content": "", + }) + } + srcParentNewSha, err := d.newTree(srcParentOldSha, delSrcTree) + if err != nil { + return err + } + + // renew but the common ancestor of srcPath and dstPath + ancestor, srcChildName, dstChildName, _, _ := getPathCommonAncestor(srcObj.GetPath(), dstDir.GetPath()) + dstNextTreeSha, err := d.renewParentTrees(dstDir.GetPath(), dstOldSha, dstNewSha, stdpath.Join(ancestor, dstChildName)) + if err != nil { + return err + } + srcNextTreeSha, err := d.renewParentTrees(stdpath.Dir(srcObj.GetPath()), srcParentOldSha, srcParentNewSha, stdpath.Join(ancestor, srcChildName)) + if err != nil { + return err + } + + // renew the tree of the last common ancestor + ancestorTree, ancestorOldSha, err := d.getTreeDirectly(ancestor) + if err != nil { + return err + } + newTree := make([]interface{}, 2) + srcBind := false + dstBind := false + for _, t := range ancestorTree.Trees { + if t.Path == srcChildName { + t.Sha = srcNextTreeSha + newTree[0] = t.TreeObjReq + srcBind = true + } + if t.Path == dstChildName { + t.Sha = dstNextTreeSha + newTree[1] = t.TreeObjReq + dstBind = true + } + if srcBind && dstBind { + break + } + } + if !srcBind || !dstBind { + return errs.ObjectNotFound + } + ancestorNewSha, err := d.newTree(ancestorOldSha, newTree) + if err != nil { + return err + } + // renew until root + rootSha, err = d.renewParentTrees(ancestor, ancestorOldSha, ancestorNewSha, "/") + if err != nil { + return err + } + } + + // commit + message, err := getMessage(d.moveMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: srcObj.GetName(), + ObjPath: srcObj.GetPath(), + ParentName: stdpath.Base(stdpath.Dir(srcObj.GetPath())), + ParentPath: stdpath.Dir(srcObj.GetPath()), + TargetName: stdpath.Base(dstDir.GetPath()), + TargetPath: dstDir.GetPath(), + }, "move") + if err != nil { + return err + } + return d.commit(message, rootSha) +} + +func (d *Github) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + parentDir := stdpath.Dir(srcObj.GetPath()) + tree, _, err := d.getTreeDirectly(parentDir) + if err != nil { + return err + } + newTree := make([]interface{}, 2) + operated := false + for _, t := range tree.Trees { + if t.Path == srcObj.GetName() { + if t.Type == "commit" { + return errors.New("cannot rename a submodule") + } + delCopy := t.TreeObjReq + delCopy.Sha = nil + newTree[0] = delCopy + t.Path = newName + newTree[1] = t.TreeObjReq + operated = true + break + } + } + if !operated { + return errs.ObjectNotFound + } + newSha, err := d.newTree(tree.Sha, newTree) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(parentDir, tree.Sha, newSha, "/") + if err != nil { + return err + } + message, err := getMessage(d.renameMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: srcObj.GetName(), + ObjPath: srcObj.GetPath(), + ParentName: stdpath.Base(parentDir), + ParentPath: parentDir, + TargetName: newName, + TargetPath: stdpath.Join(parentDir, newName), + }, "rename") + if err != nil { + return err + } + return d.commit(message, rootSha) +} + +func (d *Github) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) { + return errors.New("cannot copy parent dir to child") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + + dstSha, newSha, _, _, err := d.copyWithoutRenewTree(srcObj, dstDir) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(dstDir.GetPath(), dstSha, newSha, "/") + if err != nil { + return err + } + message, err := getMessage(d.copyMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: srcObj.GetName(), + ObjPath: srcObj.GetPath(), + ParentName: stdpath.Base(stdpath.Dir(srcObj.GetPath())), + ParentPath: stdpath.Dir(srcObj.GetPath()), + TargetName: stdpath.Base(dstDir.GetPath()), + TargetPath: dstDir.GetPath(), + }, "copy") + if err != nil { + return err + } + return d.commit(message, rootSha) +} + +func (d *Github) Remove(ctx context.Context, obj model.Obj) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + parentDir := stdpath.Dir(obj.GetPath()) + tree, treeSha, err := d.getTreeDirectly(parentDir) + if err != nil { + return err + } + var del *TreeObjReq = nil + for _, t := range tree.Trees { + if t.Path == obj.GetName() { + if t.Type == "commit" { + return errors.New("cannot remove a submodule") + } + del = &t.TreeObjReq + del.Sha = nil + break + } + } + if del == nil { + return errs.ObjectNotFound + } + newTree := make([]interface{}, 0, 2) + newTree = append(newTree, *del) + if len(tree.Trees) == 1 { // completely emptying the repository will get a 404 + newTree = append(newTree, map[string]string{ + "path": ".gitkeep", + "mode": "100644", + "type": "blob", + "content": "", + }) + } + newSha, err := d.newTree(treeSha, newTree) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(parentDir, treeSha, newSha, "/") + if err != nil { + return err + } + commitMessage, err := getMessage(d.deleteMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: obj.GetName(), + ObjPath: obj.GetPath(), + ParentName: stdpath.Base(parentDir), + ParentPath: parentDir, + }, "remove") + if err != nil { + return err + } + return d.commit(commitMessage, rootSha) +} + +func (d *Github) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + blob, err := d.putBlob(ctx, stream, up) + if err != nil { + return err + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + parent, err := d.get(dstDir.GetPath()) + if err != nil { + return err + } + if parent.Entries == nil { + return errs.NotFolder + } + newTree := make([]interface{}, 0, 2) + newTree = append(newTree, TreeObjReq{ + Path: stream.GetName(), + Mode: "100644", + Type: "blob", + Sha: blob, + }) + if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" { + newTree = append(newTree, TreeObjReq{ + Path: ".gitkeep", + Mode: "100644", + Type: "blob", + Sha: nil, + }) + } + newSha, err := d.newTree(parent.Sha, newTree) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(dstDir.GetPath(), parent.Sha, newSha, "/") + if err != nil { + return err + } + + commitMessage, err := getMessage(d.putMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: stream.GetName(), + ObjPath: stdpath.Join(dstDir.GetPath(), stream.GetName()), + ParentName: dstDir.GetName(), + ParentPath: dstDir.GetPath(), + }, "upload") + if err != nil { + return err + } + return d.commit(commitMessage, rootSha) +} + +var _ driver.Driver = (*Github)(nil) + +func (d *Github) getContentApiUrl(path string) string { + path = utils.FixAndCleanPath(path) + return fmt.Sprintf("https://api.github.com/repos/%s/%s/contents%s", d.Owner, d.Repo, path) +} + +func (d *Github) get(path string) (*Object, error) { + res, err := d.client.R().SetQueryParam("ref", d.Ref).Get(d.getContentApiUrl(path)) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + return nil, toErr(res) + } + var resp Object + err = utils.Json.Unmarshal(res.Body(), &resp) + return &resp, err +} + +func (d *Github) createGitKeep(path, message string) error { + body := map[string]interface{}{ + "message": message, + "content": "", + "branch": d.Ref, + } + d.addCommitterAndAuthor(&body) + + res, err := d.client.R().SetBody(body).Put(d.getContentApiUrl(stdpath.Join(path, ".gitkeep"))) + if err != nil { + return err + } + if res.StatusCode() != 200 && res.StatusCode() != 201 { + return toErr(res) + } + return nil +} + +func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) (string, error) { + beforeContent := "{\"encoding\":\"base64\",\"content\":\"" + afterContent := "\"}" + length := int64(len(beforeContent)) + calculateBase64Length(stream.GetSize()) + int64(len(afterContent)) + beforeContentReader := strings.NewReader(beforeContent) + contentReader, contentWriter := io.Pipe() + go func() { + encoder := base64.NewEncoder(base64.StdEncoding, contentWriter) + if _, err := io.Copy(encoder, stream); err != nil { + _ = contentWriter.CloseWithError(err) + return + } + _ = encoder.Close() + _ = contentWriter.Close() + }() + afterContentReader := strings.NewReader(afterContent) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo), + &ReaderWithProgress{ + Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader), + Length: length, + Progress: up, + }) + if err != nil { + return "", err + } + req.Header.Set("Accept", "application/vnd.github+json") + req.Header.Set("Authorization", "Bearer "+d.Token) + req.Header.Set("X-GitHub-Api-Version", "2022-11-28") + req.ContentLength = length + + res, err := base.HttpClient.Do(req) + if err != nil { + return "", err + } + resBody, err := io.ReadAll(res.Body) + if err != nil { + return "", err + } + if res.StatusCode != 201 { + var errMsg ErrResp + if err = utils.Json.Unmarshal(resBody, &errMsg); err != nil { + return "", errors.New(res.Status) + } else { + return "", fmt.Errorf("%s: %s", res.Status, errMsg.Message) + } + } + var resp PutBlobResp + if err = utils.Json.Unmarshal(resBody, &resp); err != nil { + return "", err + } + return resp.Sha, nil +} + +func (d *Github) delete(path, sha, message string) error { + body := map[string]interface{}{ + "message": message, + "sha": sha, + "branch": d.Ref, + } + d.addCommitterAndAuthor(&body) + res, err := d.client.R().SetBody(body).Delete(d.getContentApiUrl(path)) + if err != nil { + return err + } + if res.StatusCode() != 200 { + return toErr(res) + } + return nil +} + +func (d *Github) renewParentTrees(path, prevSha, curSha, until string) (string, error) { + for path != until { + path = stdpath.Dir(path) + tree, sha, err := d.getTreeDirectly(path) + if err != nil { + return "", err + } + var newTree *TreeObjReq = nil + for _, t := range tree.Trees { + if t.Sha == prevSha { + newTree = &t.TreeObjReq + newTree.Sha = curSha + break + } + } + if newTree == nil { + return "", errs.ObjectNotFound + } + curSha, err = d.newTree(sha, []interface{}{*newTree}) + if err != nil { + return "", err + } + prevSha = sha + } + return curSha, nil +} + +func (d *Github) getTree(sha string) (*TreeResp, error) { + res, err := d.client.R().Get(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees/%s", d.Owner, d.Repo, sha)) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + return nil, toErr(res) + } + var resp TreeResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (d *Github) getTreeDirectly(path string) (*TreeResp, string, error) { + p, err := d.get(path) + if err != nil { + return nil, "", err + } + if p.Entries == nil { + return nil, "", fmt.Errorf("%s is not a folder", path) + } + tree, err := d.getTree(p.Sha) + if err != nil { + return nil, "", err + } + if tree.Truncated { + return nil, "", fmt.Errorf("tree %s is truncated", path) + } + return tree, p.Sha, nil +} + +func (d *Github) newTree(baseSha string, tree []interface{}) (string, error) { + res, err := d.client.R(). + SetBody(&TreeReq{ + BaseTree: baseSha, + Trees: tree, + }). + Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees", d.Owner, d.Repo)) + if err != nil { + return "", err + } + if res.StatusCode() != 201 { + return "", toErr(res) + } + var resp TreeResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return "", err + } + return resp.Sha, nil +} + +func (d *Github) commit(message, treeSha string) error { + oldCommit, err := d.getBranchHead() + body := map[string]interface{}{ + "message": message, + "tree": treeSha, + "parents": []string{oldCommit}, + } + d.addCommitterAndAuthor(&body) + res, err := d.client.R().SetBody(body).Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/commits", d.Owner, d.Repo)) + if err != nil { + return err + } + if res.StatusCode() != 201 { + return toErr(res) + } + var resp CommitResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return err + } + + // update branch head + res, err = d.client.R(). + SetBody(&UpdateRefReq{ + Sha: resp.Sha, + Force: false, + }). + Patch(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/refs/heads/%s", d.Owner, d.Repo, d.Ref)) + if err != nil { + return err + } + if res.StatusCode() != 200 { + return toErr(res) + } + return nil +} + +func (d *Github) getBranchHead() (string, error) { + res, err := d.client.R().Get(fmt.Sprintf("https://api.github.com/repos/%s/%s/branches/%s", d.Owner, d.Repo, d.Ref)) + if err != nil { + return "", err + } + if res.StatusCode() != 200 { + return "", toErr(res) + } + var resp BranchResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return "", err + } + return resp.Commit.Sha, nil +} + +func (d *Github) copyWithoutRenewTree(srcObj, dstDir model.Obj) (dstSha, newSha, srcParentSha string, srcParentTree *TreeResp, err error) { + dst, err := d.get(dstDir.GetPath()) + if err != nil { + return "", "", "", nil, err + } + if dst.Entries == nil { + return "", "", "", nil, errs.NotFolder + } + dstSha = dst.Sha + srcParentPath := stdpath.Dir(srcObj.GetPath()) + srcParentTree, srcParentSha, err = d.getTreeDirectly(srcParentPath) + if err != nil { + return "", "", "", nil, err + } + var src *TreeObjReq = nil + for _, t := range srcParentTree.Trees { + if t.Path == srcObj.GetName() { + if t.Type == "commit" { + return "", "", "", nil, errors.New("cannot copy a submodule") + } + src = &t.TreeObjReq + break + } + } + if src == nil { + return "", "", "", nil, errs.ObjectNotFound + } + + newTree := make([]interface{}, 0, 2) + newTree = append(newTree, *src) + if len(dst.Entries) == 1 && dst.Entries[0].Name == ".gitkeep" { + newTree = append(newTree, TreeObjReq{ + Path: ".gitkeep", + Mode: "100644", + Type: "blob", + Sha: nil, + }) + } + newSha, err = d.newTree(dstSha, newTree) + if err != nil { + return "", "", "", nil, err + } + return dstSha, newSha, srcParentSha, srcParentTree, nil +} + +func (d *Github) getRepo() (*RepoResp, error) { + res, err := d.client.R().Get(fmt.Sprintf("https://api.github.com/repos/%s/%s", d.Owner, d.Repo)) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + return nil, toErr(res) + } + var resp RepoResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (d *Github) addCommitterAndAuthor(m *map[string]interface{}) { + if d.CommitterName != "" { + committer := map[string]string{ + "name": d.CommitterName, + "email": d.CommitterEmail, + } + (*m)["committer"] = committer + } + if d.AuthorName != "" { + author := map[string]string{ + "name": d.AuthorName, + "email": d.AuthorEmail, + } + (*m)["author"] = author + } +} diff --git a/drivers/github/meta.go b/drivers/github/meta.go new file mode 100644 index 00000000..0df4aa60 --- /dev/null +++ b/drivers/github/meta.go @@ -0,0 +1,36 @@ +package github + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootPath + Token string `json:"token" type:"string" required:"true"` + Owner string `json:"owner" type:"string" required:"true"` + Repo string `json:"repo" type:"string" required:"true"` + Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."` + CommitterName string `json:"committer_name" type:"string"` + CommitterEmail string `json:"committer_email" type:"string"` + AuthorName string `json:"author_name" type:"string"` + AuthorEmail string `json:"author_email" type:"string"` + MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"` + DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"` + PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"` + RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"` + CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"` + MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"` +} + +var config = driver.Config{ + Name: "GitHub API", + LocalSort: true, + DefaultRoot: "/", +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Github{} + }) +} diff --git a/drivers/github/types.go b/drivers/github/types.go new file mode 100644 index 00000000..425f8979 --- /dev/null +++ b/drivers/github/types.go @@ -0,0 +1,102 @@ +package github + +import ( + "github.com/alist-org/alist/v3/internal/model" + "time" +) + +type Links struct { + Git string `json:"git"` + Html string `json:"html"` + Self string `json:"self"` +} + +type Object struct { + Type string `json:"type"` + Encoding string `json:"encoding" required:"false"` + Size int64 `json:"size"` + Name string `json:"name"` + Path string `json:"path"` + Content string `json:"Content" required:"false"` + Sha string `json:"sha"` + URL string `json:"url"` + GitURL string `json:"git_url"` + HtmlURL string `json:"html_url"` + DownloadURL string `json:"download_url"` + Entries []Object `json:"entries" required:"false"` + Links Links `json:"_links"` + SubmoduleGitURL string `json:"submodule_git_url" required:"false"` + Target string `json:"target" required:"false"` +} + +func (o *Object) toModelObj() *model.Object { + return &model.Object{ + Name: o.Name, + Size: o.Size, + Modified: time.Unix(0, 0), + IsFolder: o.Type == "dir", + } +} + +type PutBlobResp struct { + URL string `json:"url"` + Sha string `json:"sha"` +} + +type ErrResp struct { + Message string `json:"message"` + DocumentationURL string `json:"documentation_url"` + Status string `json:"status"` +} + +type TreeObjReq struct { + Path string `json:"path"` + Mode string `json:"mode"` + Type string `json:"type"` + Sha interface{} `json:"sha"` +} + +type TreeObjResp struct { + TreeObjReq + Size int64 `json:"size" required:"false"` + URL string `json:"url"` +} + +func (o *TreeObjResp) toModelObj() *model.Object { + return &model.Object{ + Name: o.Path, + Size: o.Size, + Modified: time.Unix(0, 0), + IsFolder: o.Type == "tree", + } +} + +type TreeResp struct { + Sha string `json:"sha"` + URL string `json:"url"` + Trees []TreeObjResp `json:"tree"` + Truncated bool `json:"truncated"` +} + +type TreeReq struct { + BaseTree string `json:"base_tree"` + Trees []interface{} `json:"tree"` +} + +type CommitResp struct { + Sha string `json:"sha"` +} + +type BranchResp struct { + Name string `json:"name"` + Commit CommitResp `json:"commit"` +} + +type UpdateRefReq struct { + Sha string `json:"sha"` + Force bool `json:"force"` +} + +type RepoResp struct { + DefaultBranch string `json:"default_branch"` +} diff --git a/drivers/github/util.go b/drivers/github/util.go new file mode 100644 index 00000000..1e7f7fdb --- /dev/null +++ b/drivers/github/util.go @@ -0,0 +1,115 @@ +package github + +import ( + "context" + "errors" + "fmt" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" + "io" + "math" + "strings" + "text/template" +) + +type ReaderWithProgress struct { + Reader io.Reader + Length int64 + Progress func(percentage float64) + offset int64 +} + +func (r *ReaderWithProgress) Read(p []byte) (int, error) { + n, err := r.Reader.Read(p) + r.offset += int64(n) + r.Progress(math.Min(100.0, float64(r.offset)/float64(r.Length)*100.0)) + return n, err +} + +type MessageTemplateVars struct { + UserName string + ObjName string + ObjPath string + ParentName string + ParentPath string + TargetName string + TargetPath string +} + +func getMessage(tmpl *template.Template, vars *MessageTemplateVars, defaultOpStr string) (string, error) { + sb := strings.Builder{} + if err := tmpl.Execute(&sb, vars); err != nil { + return fmt.Sprintf("%s %s %s", vars.UserName, defaultOpStr, vars.ObjPath), err + } + return sb.String(), nil +} + +func calculateBase64Length(inputLength int64) int64 { + return 4 * ((inputLength + 2) / 3) +} + +func toErr(res *resty.Response) error { + var errMsg ErrResp + if err := utils.Json.Unmarshal(res.Body(), &errMsg); err != nil { + return errors.New(res.Status()) + } else { + return fmt.Errorf("%s: %s", res.Status(), errMsg.Message) + } +} + +// Example input: +// a = /aaa/bbb/ccc +// b = /aaa/b11/ddd/ccc +// +// Output: +// ancestor = /aaa +// aChildName = bbb +// bChildName = b11 +// aRest = bbb/ccc +// bRest = b11/ddd/ccc +func getPathCommonAncestor(a, b string) (ancestor, aChildName, bChildName, aRest, bRest string) { + a = utils.FixAndCleanPath(a) + b = utils.FixAndCleanPath(b) + idx := 1 + for idx < len(a) && idx < len(b) { + if a[idx] != b[idx] { + break + } + idx++ + } + aNextIdx := idx + for aNextIdx < len(a) { + if a[aNextIdx] == '/' { + break + } + aNextIdx++ + } + bNextIdx := idx + for bNextIdx < len(b) { + if b[bNextIdx] == '/' { + break + } + bNextIdx++ + } + for idx > 0 { + if a[idx] == '/' { + break + } + idx-- + } + ancestor = utils.FixAndCleanPath(a[:idx]) + aChildName = a[idx+1 : aNextIdx] + bChildName = b[idx+1 : bNextIdx] + aRest = a[idx+1:] + bRest = b[idx+1:] + return ancestor, aChildName, bChildName, aRest, bRest +} + +func getUsername(ctx context.Context) string { + user, ok := ctx.Value("user").(*model.User) + if !ok { + return "" + } + return user.Username +} From b60da9732f22b22d84f015d6aaabcb2f058871d1 Mon Sep 17 00:00:00 2001 From: Jealous Date: Fri, 10 Jan 2025 21:24:44 +0800 Subject: [PATCH 081/187] feat(offline-download): allow using offline download tools in any storage (#7716) * Feat(offline-download): allow using thunder offline download tool in any storage * Feat(offline-download): allow using 115 offline download tool in any storage * Feat(offline-download): allow using pikpak offline download tool in any storage * style(offline-download): unify offline download tool names * feat(offline-download): show available offline download tools only * Fix(offline-download): update unmodified tool names. --------- Co-authored-by: Andy Hsu --- internal/conf/const.go | 9 + internal/offline_download/115/client.go | 23 +- internal/offline_download/pikpak/pikpak.go | 25 +- internal/offline_download/thunder/thunder.go | 25 +- internal/offline_download/tool/add.go | 33 ++- internal/offline_download/tool/all_test.go | 17 -- internal/offline_download/tool/base.go | 29 -- internal/offline_download/tool/download.go | 55 +--- internal/offline_download/tool/tools.go | 6 +- internal/offline_download/tool/transfer.go | 279 ++++++++++++++---- internal/offline_download/tool/util.go | 41 --- .../offline_download/transmission/client.go | 2 +- server/handles/offline_download.go | 147 ++++++++- server/router.go | 3 + 14 files changed, 484 insertions(+), 210 deletions(-) delete mode 100644 internal/offline_download/tool/all_test.go delete mode 100644 internal/offline_download/tool/util.go diff --git a/internal/conf/const.go b/internal/conf/const.go index 99e8c868..0e534350 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -58,6 +58,15 @@ const ( TransmissionUri = "transmission_uri" TransmissionSeedtime = "transmission_seedtime" + // 115 + Pan115TempDir = "115_temp_dir" + + // pikpak + PikPakTempDir = "pikpak_temp_dir" + + // thunder + ThunderTempDir = "thunder_temp_dir" + // single Token = "token" IndexProgress = "index_progress" diff --git a/internal/offline_download/115/client.go b/internal/offline_download/115/client.go index 45f147db..3f9d804d 100644 --- a/internal/offline_download/115/client.go +++ b/internal/offline_download/115/client.go @@ -3,6 +3,8 @@ package _115 import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/drivers/115" "github.com/alist-org/alist/v3/internal/errs" @@ -33,13 +35,23 @@ func (p *Cloud115) Init() (string, error) { } func (p *Cloud115) IsReady() bool { + tempDir := setting.GetStr(conf.Pan115TempDir) + if tempDir == "" { + return false + } + storage, _, err := op.GetStorageAndActualPath(tempDir) + if err != nil { + return false + } + if _, ok := storage.(*_115.Pan115); !ok { + return false + } return true } func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) { // 添加新任务刷新缓存 p.refreshTaskCache = true - // args.TempDir 已经被修改为了 DstDirPath storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir) if err != nil { return "", err @@ -50,6 +62,11 @@ func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) { } ctx := context.Background() + + if err := op.MakeDir(ctx, storage, actualPath); err != nil { + return "", err + } + parentDir, err := op.GetUnwrap(ctx, storage, actualPath) if err != nil { return "", err @@ -64,7 +81,7 @@ func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) { } func (p *Cloud115) Remove(task *tool.DownloadTask) error { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return err } @@ -81,7 +98,7 @@ func (p *Cloud115) Remove(task *tool.DownloadTask) error { } func (p *Cloud115) Status(task *tool.DownloadTask) (*tool.Status, error) { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return nil, err } diff --git a/internal/offline_download/pikpak/pikpak.go b/internal/offline_download/pikpak/pikpak.go index f07b3de8..8fdfb340 100644 --- a/internal/offline_download/pikpak/pikpak.go +++ b/internal/offline_download/pikpak/pikpak.go @@ -3,6 +3,8 @@ package pikpak import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/setting" "strconv" "github.com/alist-org/alist/v3/drivers/pikpak" @@ -17,7 +19,7 @@ type PikPak struct { } func (p *PikPak) Name() string { - return "pikpak" + return "PikPak" } func (p *PikPak) Items() []model.SettingItem { @@ -34,13 +36,23 @@ func (p *PikPak) Init() (string, error) { } func (p *PikPak) IsReady() bool { + tempDir := setting.GetStr(conf.PikPakTempDir) + if tempDir == "" { + return false + } + storage, _, err := op.GetStorageAndActualPath(tempDir) + if err != nil { + return false + } + if _, ok := storage.(*pikpak.PikPak); !ok { + return false + } return true } func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) { // 添加新任务刷新缓存 p.refreshTaskCache = true - // args.TempDir 已经被修改为了 DstDirPath storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir) if err != nil { return "", err @@ -51,6 +63,11 @@ func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) { } ctx := context.Background() + + if err := op.MakeDir(ctx, storage, actualPath); err != nil { + return "", err + } + parentDir, err := op.GetUnwrap(ctx, storage, actualPath) if err != nil { return "", err @@ -65,7 +82,7 @@ func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) { } func (p *PikPak) Remove(task *tool.DownloadTask) error { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return err } @@ -82,7 +99,7 @@ func (p *PikPak) Remove(task *tool.DownloadTask) error { } func (p *PikPak) Status(task *tool.DownloadTask) (*tool.Status, error) { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return nil, err } diff --git a/internal/offline_download/thunder/thunder.go b/internal/offline_download/thunder/thunder.go index 3ab8b002..81b94861 100644 --- a/internal/offline_download/thunder/thunder.go +++ b/internal/offline_download/thunder/thunder.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/setting" "strconv" "github.com/alist-org/alist/v3/drivers/thunder" @@ -18,7 +20,7 @@ type Thunder struct { } func (t *Thunder) Name() string { - return "thunder" + return "Thunder" } func (t *Thunder) Items() []model.SettingItem { @@ -35,13 +37,23 @@ func (t *Thunder) Init() (string, error) { } func (t *Thunder) IsReady() bool { + tempDir := setting.GetStr(conf.ThunderTempDir) + if tempDir == "" { + return false + } + storage, _, err := op.GetStorageAndActualPath(tempDir) + if err != nil { + return false + } + if _, ok := storage.(*thunder.Thunder); !ok { + return false + } return true } func (t *Thunder) AddURL(args *tool.AddUrlArgs) (string, error) { // 添加新任务刷新缓存 t.refreshTaskCache = true - // args.TempDir 已经被修改为了 DstDirPath storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir) if err != nil { return "", err @@ -52,6 +64,11 @@ func (t *Thunder) AddURL(args *tool.AddUrlArgs) (string, error) { } ctx := context.Background() + + if err := op.MakeDir(ctx, storage, actualPath); err != nil { + return "", err + } + parentDir, err := op.GetUnwrap(ctx, storage, actualPath) if err != nil { return "", err @@ -66,7 +83,7 @@ func (t *Thunder) AddURL(args *tool.AddUrlArgs) (string, error) { } func (t *Thunder) Remove(task *tool.DownloadTask) error { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return err } @@ -83,7 +100,7 @@ func (t *Thunder) Remove(task *tool.DownloadTask) error { } func (t *Thunder) Status(task *tool.DownloadTask) (*tool.Status, error) { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return nil, err } diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go index 405f96cb..884e166b 100644 --- a/internal/offline_download/tool/add.go +++ b/internal/offline_download/tool/add.go @@ -2,8 +2,12 @@ package tool import ( "context" + _115 "github.com/alist-org/alist/v3/drivers/115" + "github.com/alist-org/alist/v3/drivers/pikpak" + "github.com/alist-org/alist/v3/drivers/thunder" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/internal/task" "net/url" "path" @@ -76,19 +80,26 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro tempDir := filepath.Join(conf.Conf.TempDir, args.Tool, uid) deletePolicy := args.DeletePolicy + // 如果当前 storage 是对应网盘,则直接下载到目标路径,无需转存 switch args.Tool { case "115 Cloud": - tempDir = args.DstDirPath - // 防止将下载好的文件删除 - deletePolicy = DeleteNever - case "pikpak": - tempDir = args.DstDirPath - // 防止将下载好的文件删除 - deletePolicy = DeleteNever - case "thunder": - tempDir = args.DstDirPath - // 防止将下载好的文件删除 - deletePolicy = DeleteNever + if _, ok := storage.(*_115.Pan115); ok { + tempDir = args.DstDirPath + } else { + tempDir = filepath.Join(setting.GetStr(conf.Pan115TempDir), uid) + } + case "PikPak": + if _, ok := storage.(*pikpak.PikPak); ok { + tempDir = args.DstDirPath + } else { + tempDir = filepath.Join(setting.GetStr(conf.PikPakTempDir), uid) + } + case "Thunder": + if _, ok := storage.(*thunder.Thunder); ok { + tempDir = args.DstDirPath + } else { + tempDir = filepath.Join(setting.GetStr(conf.ThunderTempDir), uid) + } } taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed diff --git a/internal/offline_download/tool/all_test.go b/internal/offline_download/tool/all_test.go deleted file mode 100644 index 27da5e32..00000000 --- a/internal/offline_download/tool/all_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package tool_test - -import ( - "testing" - - "github.com/alist-org/alist/v3/internal/offline_download/tool" -) - -func TestGetFiles(t *testing.T) { - files, err := tool.GetFiles("..") - if err != nil { - t.Fatal(err) - } - for _, file := range files { - t.Log(file.Name, file.Size, file.Path, file.Modified) - } -} diff --git a/internal/offline_download/tool/base.go b/internal/offline_download/tool/base.go index ae9eac26..b14169f8 100644 --- a/internal/offline_download/tool/base.go +++ b/internal/offline_download/tool/base.go @@ -1,10 +1,6 @@ package tool import ( - "io" - "os" - "time" - "github.com/alist-org/alist/v3/internal/model" ) @@ -40,28 +36,3 @@ type Tool interface { // Run for simple http download Run(task *DownloadTask) error } - -type GetFileser interface { - // GetFiles return the files of the download task, if nil, means walk the temp dir to get the files - GetFiles(task *DownloadTask) []File -} - -type File struct { - // ReadCloser for http client - ReadCloser io.ReadCloser - Name string - Size int64 - Path string - Modified time.Time -} - -func (f *File) GetReadCloser() (io.ReadCloser, error) { - if f.ReadCloser != nil { - return f.ReadCloser, nil - } - file, err := os.Open(f.Path) - if err != nil { - return nil, err - } - return file, nil -} diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go index 94bf7dbb..c3b30f1b 100644 --- a/internal/offline_download/tool/download.go +++ b/internal/offline_download/tool/download.go @@ -40,7 +40,7 @@ func (t *DownloadTask) Run() error { } if err := t.tool.Run(t); !errs.IsNotSupportError(err) { if err == nil { - return t.Complete() + return t.Transfer() } return err } @@ -80,10 +80,10 @@ outer: if err != nil { return err } - if t.tool.Name() == "pikpak" { + if t.tool.Name() == "Pikpak" { return nil } - if t.tool.Name() == "thunder" { + if t.tool.Name() == "Thunder" { return nil } if t.tool.Name() == "115 Cloud" { @@ -109,7 +109,7 @@ outer: } } - if t.tool.Name() == "transmission" { + if t.tool.Name() == "Transmission" { // hack for transmission seedTime := setting.GetInt(conf.TransmissionSeedtime, 0) if seedTime >= 0 { @@ -146,7 +146,7 @@ func (t *DownloadTask) Update() (bool, error) { } // if download completed if info.Completed { - err := t.Complete() + err := t.Transfer() return true, errors.WithMessage(err, "failed to transfer file") } // if download failed @@ -156,45 +156,16 @@ func (t *DownloadTask) Update() (bool, error) { return false, nil } -func (t *DownloadTask) Complete() error { - var ( - files []File - err error - ) - if t.tool.Name() == "pikpak" { - return nil - } - if t.tool.Name() == "thunder" { - return nil - } - if t.tool.Name() == "115 Cloud" { - return nil - } - if getFileser, ok := t.tool.(GetFileser); ok { - files = getFileser.GetFiles(t) - } else { - files, err = GetFiles(t.TempDir) - if err != nil { - return errors.Wrapf(err, "failed to get files") +func (t *DownloadTask) Transfer() error { + toolName := t.tool.Name() + if toolName == "115 Cloud" || toolName == "PikPak" || toolName == "Thunder" { + // 如果不是直接下载到目标路径,则进行转存 + if t.TempDir != t.DstDirPath { + return transferObj(t.Ctx(), t.TempDir, t.DstDirPath, t.DeletePolicy) } + return nil } - // upload files - for i := range files { - file := files[i] - tsk := &TransferTask{ - TaskExtension: task.TaskExtension{ - Creator: t.GetCreator(), - }, - file: file, - DstDirPath: t.DstDirPath, - TempDir: t.TempDir, - DeletePolicy: t.DeletePolicy, - FileDir: file.Path, - } - tsk.SetTotalBytes(file.Size) - TransferTaskManager.Add(tsk) - } - return nil + return transferStd(t.Ctx(), t.TempDir, t.DstDirPath, t.DeletePolicy) } func (t *DownloadTask) GetName() string { diff --git a/internal/offline_download/tool/tools.go b/internal/offline_download/tool/tools.go index 9de7d526..4a31ac7f 100644 --- a/internal/offline_download/tool/tools.go +++ b/internal/offline_download/tool/tools.go @@ -3,6 +3,7 @@ package tool import ( "fmt" "github.com/alist-org/alist/v3/internal/model" + "sort" ) var ( @@ -25,8 +26,11 @@ func (t ToolsManager) Add(tool Tool) { func (t ToolsManager) Names() []string { names := make([]string, 0, len(t)) for name := range t { - names = append(names, name) + if tool, err := t.Get(name); err == nil && tool.IsReady() { + names = append(names, name) + } } + sort.Strings(names) return names } diff --git a/internal/offline_download/tool/transfer.go b/internal/offline_download/tool/transfer.go index a77c4822..8c7ab244 100644 --- a/internal/offline_download/tool/transfer.go +++ b/internal/offline_download/tool/transfer.go @@ -1,11 +1,9 @@ package tool import ( + "context" "fmt" - "os" - "path/filepath" - "time" - + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/stream" @@ -14,80 +12,60 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/xhofe/tache" + "net/http" + "os" + stdpath "path" + "path/filepath" + "time" ) type TransferTask struct { task.TaskExtension - FileDir string `json:"file_dir"` - DstDirPath string `json:"dst_dir_path"` - TempDir string `json:"temp_dir"` - DeletePolicy DeletePolicy `json:"delete_policy"` - file File + Status string `json:"-"` //don't save status to save space + SrcObjPath string `json:"src_obj_path"` + DstDirPath string `json:"dst_dir_path"` + SrcStorage driver.Driver `json:"-"` + DstStorage driver.Driver `json:"-"` + SrcStorageMp string `json:"src_storage_mp"` + DstStorageMp string `json:"dst_storage_mp"` + DeletePolicy DeletePolicy `json:"delete_policy"` } func (t *TransferTask) Run() error { t.ClearEndTime() t.SetStartTime(time.Now()) defer func() { t.SetEndTime(time.Now()) }() - // check dstDir again - var err error - if (t.file == File{}) { - t.file, err = GetFile(t.FileDir) - if err != nil { - return errors.Wrapf(err, "failed to get file %s", t.FileDir) - } + if t.SrcStorage == nil { + return transferStdPath(t) + } else { + return transferObjPath(t) } - storage, dstDirActualPath, err := op.GetStorageAndActualPath(t.DstDirPath) - if err != nil { - return errors.WithMessage(err, "failed get storage") - } - mimetype := utils.GetMimeType(t.file.Path) - rc, err := t.file.GetReadCloser() - if err != nil { - return errors.Wrapf(err, "failed to open file %s", t.file.Path) - } - s := &stream.FileStream{ - Ctx: nil, - Obj: &model.Object{ - Name: filepath.Base(t.file.Path), - Size: t.file.Size, - Modified: t.file.Modified, - IsFolder: false, - }, - Reader: rc, - Mimetype: mimetype, - Closers: utils.NewClosers(rc), - } - relDir, err := filepath.Rel(t.TempDir, filepath.Dir(t.file.Path)) - if err != nil { - log.Errorf("find relation directory error: %v", err) - } - newDistDir := filepath.Join(dstDirActualPath, relDir) - return op.Put(t.Ctx(), storage, newDistDir, s, t.SetProgress) } func (t *TransferTask) GetName() string { - return fmt.Sprintf("transfer %s to [%s]", t.file.Path, t.DstDirPath) + return fmt.Sprintf("transfer [%s](%s) to [%s](%s)", t.SrcStorageMp, t.SrcObjPath, t.DstStorageMp, t.DstDirPath) } func (t *TransferTask) GetStatus() string { - return "transferring" + return t.Status } func (t *TransferTask) OnSucceeded() { if t.DeletePolicy == DeleteOnUploadSucceed || t.DeletePolicy == DeleteAlways { - err := os.Remove(t.file.Path) - if err != nil { - log.Errorf("failed to delete file %s, error: %s", t.file.Path, err.Error()) + if t.SrcStorage == nil { + removeStdTemp(t) + } else { + removeObjTemp(t) } } } func (t *TransferTask) OnFailed() { if t.DeletePolicy == DeleteOnUploadFailed || t.DeletePolicy == DeleteAlways { - err := os.Remove(t.file.Path) - if err != nil { - log.Errorf("failed to delete file %s, error: %s", t.file.Path, err.Error()) + if t.SrcStorage == nil { + removeStdTemp(t) + } else { + removeObjTemp(t) } } } @@ -95,3 +73,202 @@ func (t *TransferTask) OnFailed() { var ( TransferTaskManager *tache.Manager[*TransferTask] ) + +func transferStd(ctx context.Context, tempDir, dstDirPath string, deletePolicy DeletePolicy) error { + dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) + if err != nil { + return errors.WithMessage(err, "failed get dst storage") + } + entries, err := os.ReadDir(tempDir) + if err != nil { + return err + } + taskCreator, _ := ctx.Value("user").(*model.User) + for _, entry := range entries { + t := &TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, + SrcObjPath: stdpath.Join(tempDir, entry.Name()), + DstDirPath: dstDirActualPath, + DstStorage: dstStorage, + DstStorageMp: dstStorage.GetStorage().MountPath, + DeletePolicy: deletePolicy, + } + TransferTaskManager.Add(t) + } + return nil +} + +func transferStdPath(t *TransferTask) error { + t.Status = "getting src object" + info, err := os.Stat(t.SrcObjPath) + if err != nil { + return err + } + if info.IsDir() { + t.Status = "src object is dir, listing objs" + entries, err := os.ReadDir(t.SrcObjPath) + if err != nil { + return err + } + for _, entry := range entries { + srcRawPath := stdpath.Join(t.SrcObjPath, entry.Name()) + dstObjPath := stdpath.Join(t.DstDirPath, info.Name()) + t := &TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: t.Creator, + }, + SrcObjPath: srcRawPath, + DstDirPath: dstObjPath, + DstStorage: t.DstStorage, + SrcStorageMp: t.SrcStorageMp, + DstStorageMp: t.DstStorageMp, + DeletePolicy: t.DeletePolicy, + } + TransferTaskManager.Add(t) + } + t.Status = "src object is dir, added all transfer tasks of files" + return nil + } + return transferStdFile(t) +} + +func transferStdFile(t *TransferTask) error { + rc, err := os.Open(t.SrcObjPath) + if err != nil { + return errors.Wrapf(err, "failed to open file %s", t.SrcObjPath) + } + info, err := rc.Stat() + if err != nil { + return errors.Wrapf(err, "failed to get file %s", t.SrcObjPath) + } + mimetype := utils.GetMimeType(t.SrcObjPath) + s := &stream.FileStream{ + Ctx: nil, + Obj: &model.Object{ + Name: filepath.Base(t.SrcObjPath), + Size: info.Size(), + Modified: info.ModTime(), + IsFolder: false, + }, + Reader: rc, + Mimetype: mimetype, + Closers: utils.NewClosers(rc), + } + t.SetTotalBytes(info.Size()) + return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, s, t.SetProgress) +} + +func removeStdTemp(t *TransferTask) { + info, err := os.Stat(t.SrcObjPath) + if err != nil || info.IsDir() { + return + } + if err := os.Remove(t.SrcObjPath); err != nil { + log.Errorf("failed to delete temp file %s, error: %s", t.SrcObjPath, err.Error()) + } +} + +func transferObj(ctx context.Context, tempDir, dstDirPath string, deletePolicy DeletePolicy) error { + srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(tempDir) + if err != nil { + return errors.WithMessage(err, "failed get src storage") + } + dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) + if err != nil { + return errors.WithMessage(err, "failed get dst storage") + } + objs, err := op.List(ctx, srcStorage, srcObjActualPath, model.ListArgs{}) + if err != nil { + return errors.WithMessagef(err, "failed list src [%s] objs", tempDir) + } + taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed + for _, obj := range objs { + t := &TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, + SrcObjPath: stdpath.Join(srcObjActualPath, obj.GetName()), + DstDirPath: dstDirActualPath, + SrcStorage: srcStorage, + DstStorage: dstStorage, + SrcStorageMp: srcStorage.GetStorage().MountPath, + DstStorageMp: dstStorage.GetStorage().MountPath, + DeletePolicy: deletePolicy, + } + TransferTaskManager.Add(t) + } + return nil +} + +func transferObjPath(t *TransferTask) error { + t.Status = "getting src object" + srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath) + if err != nil { + return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath) + } + if srcObj.IsDir() { + t.Status = "src object is dir, listing objs" + objs, err := op.List(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.ListArgs{}) + if err != nil { + return errors.WithMessagef(err, "failed list src [%s] objs", t.SrcObjPath) + } + for _, obj := range objs { + if utils.IsCanceled(t.Ctx()) { + return nil + } + srcObjPath := stdpath.Join(t.SrcObjPath, obj.GetName()) + dstObjPath := stdpath.Join(t.DstDirPath, srcObj.GetName()) + TransferTaskManager.Add(&TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: t.Creator, + }, + SrcObjPath: srcObjPath, + DstDirPath: dstObjPath, + SrcStorage: t.SrcStorage, + DstStorage: t.DstStorage, + SrcStorageMp: t.SrcStorageMp, + DstStorageMp: t.DstStorageMp, + DeletePolicy: t.DeletePolicy, + }) + } + t.Status = "src object is dir, added all transfer tasks of objs" + return nil + } + return transferObjFile(t) +} + +func transferObjFile(t *TransferTask) error { + srcFile, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath) + if err != nil { + return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath) + } + link, _, err := op.Link(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.LinkArgs{ + Header: http.Header{}, + }) + if err != nil { + return errors.WithMessagef(err, "failed get [%s] link", t.SrcObjPath) + } + fs := stream.FileStream{ + Obj: srcFile, + Ctx: t.Ctx(), + } + // any link provided is seekable + ss, err := stream.NewSeekableStream(fs, link) + if err != nil { + return errors.WithMessagef(err, "failed get [%s] stream", t.SrcObjPath) + } + t.SetTotalBytes(srcFile.GetSize()) + return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, ss, t.SetProgress) +} + +func removeObjTemp(t *TransferTask) { + srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath) + if err != nil || srcObj.IsDir() { + return + } + if err := op.Remove(t.Ctx(), t.SrcStorage, t.SrcObjPath); err != nil { + log.Errorf("failed to delete temp obj %s, error: %s", t.SrcObjPath, err.Error()) + } +} diff --git a/internal/offline_download/tool/util.go b/internal/offline_download/tool/util.go deleted file mode 100644 index b2c6ec02..00000000 --- a/internal/offline_download/tool/util.go +++ /dev/null @@ -1,41 +0,0 @@ -package tool - -import ( - "os" - "path/filepath" -) - -func GetFiles(dir string) ([]File, error) { - var files []File - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - files = append(files, File{ - Name: info.Name(), - Size: info.Size(), - Path: path, - Modified: info.ModTime(), - }) - } - return nil - }) - if err != nil { - return nil, err - } - return files, nil -} - -func GetFile(path string) (File, error) { - info, err := os.Stat(path) - if err != nil { - return File{}, err - } - return File{ - Name: info.Name(), - Size: info.Size(), - Path: path, - Modified: info.ModTime(), - }, nil -} diff --git a/internal/offline_download/transmission/client.go b/internal/offline_download/transmission/client.go index 4131f3e1..8049afd6 100644 --- a/internal/offline_download/transmission/client.go +++ b/internal/offline_download/transmission/client.go @@ -29,7 +29,7 @@ func (t *Transmission) Run(task *tool.DownloadTask) error { } func (t *Transmission) Name() string { - return "transmission" + return "Transmission" } func (t *Transmission) Items() []model.SettingItem { diff --git a/server/handles/offline_download.go b/server/handles/offline_download.go index c7b7af76..24ff7a05 100644 --- a/server/handles/offline_download.go +++ b/server/handles/offline_download.go @@ -1,6 +1,9 @@ package handles import ( + _115 "github.com/alist-org/alist/v3/drivers/115" + "github.com/alist-org/alist/v3/drivers/pikpak" + "github.com/alist-org/alist/v3/drivers/thunder" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/offline_download/tool" @@ -73,11 +76,6 @@ func SetQbittorrent(c *gin.Context) { common.SuccessResp(c, "ok") } -func OfflineDownloadTools(c *gin.Context) { - tools := tool.Tools.Names() - common.SuccessResp(c, tools) -} - type SetTransmissionReq struct { Uri string `json:"uri" form:"uri"` Seedtime string `json:"seedtime" form:"seedtime"` @@ -97,7 +95,7 @@ func SetTransmission(c *gin.Context) { common.ErrorResp(c, err, 500) return } - _tool, err := tool.Tools.Get("transmission") + _tool, err := tool.Tools.Get("Transmission") if err != nil { common.ErrorResp(c, err, 500) return @@ -109,6 +107,143 @@ func SetTransmission(c *gin.Context) { common.SuccessResp(c, "ok") } +type Set115Req struct { + TempDir string `json:"temp_dir" form:"temp_dir"` +} + +func Set115(c *gin.Context) { + var req Set115Req + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + if req.TempDir != "" { + storage, _, err := op.GetStorageAndActualPath(req.TempDir) + if err != nil { + common.ErrorStrResp(c, "storage does not exists", 400) + return + } + if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK { + common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400) + return + } + if _, ok := storage.(*_115.Pan115); !ok { + common.ErrorStrResp(c, "unsupported storage driver for offline download, only 115 Cloud is supported", 400) + return + } + } + items := []model.SettingItem{ + {Key: conf.Pan115TempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } + if err := op.SaveSettingItems(items); err != nil { + common.ErrorResp(c, err, 500) + return + } + _tool, err := tool.Tools.Get("115 Cloud") + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if _, err := _tool.Init(); err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, "ok") +} + +type SetPikPakReq struct { + TempDir string `json:"temp_dir" form:"temp_dir"` +} + +func SetPikPak(c *gin.Context) { + var req SetPikPakReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + if req.TempDir != "" { + storage, _, err := op.GetStorageAndActualPath(req.TempDir) + if err != nil { + common.ErrorStrResp(c, "storage does not exists", 400) + return + } + if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK { + common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400) + return + } + if _, ok := storage.(*pikpak.PikPak); !ok { + common.ErrorStrResp(c, "unsupported storage driver for offline download, only PikPak is supported", 400) + return + } + } + items := []model.SettingItem{ + {Key: conf.PikPakTempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } + if err := op.SaveSettingItems(items); err != nil { + common.ErrorResp(c, err, 500) + return + } + _tool, err := tool.Tools.Get("PikPak") + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if _, err := _tool.Init(); err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, "ok") +} + +type SetThunderReq struct { + TempDir string `json:"temp_dir" form:"temp_dir"` +} + +func SetThunder(c *gin.Context) { + var req SetThunderReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + if req.TempDir != "" { + storage, _, err := op.GetStorageAndActualPath(req.TempDir) + if err != nil { + common.ErrorStrResp(c, "storage does not exists", 400) + return + } + if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK { + common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400) + return + } + if _, ok := storage.(*thunder.Thunder); !ok { + common.ErrorStrResp(c, "unsupported storage driver for offline download, only Thunder is supported", 400) + return + } + } + items := []model.SettingItem{ + {Key: conf.ThunderTempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } + if err := op.SaveSettingItems(items); err != nil { + common.ErrorResp(c, err, 500) + return + } + _tool, err := tool.Tools.Get("Thunder") + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if _, err := _tool.Init(); err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, "ok") +} + +func OfflineDownloadTools(c *gin.Context) { + tools := tool.Tools.Names() + common.SuccessResp(c, tools) +} + type AddOfflineDownloadReq struct { Urls []string `json:"urls"` Path string `json:"path"` diff --git a/server/router.go b/server/router.go index 9ff50365..184de51e 100644 --- a/server/router.go +++ b/server/router.go @@ -132,6 +132,9 @@ func admin(g *gin.RouterGroup) { setting.POST("/set_aria2", handles.SetAria2) setting.POST("/set_qbit", handles.SetQbittorrent) setting.POST("/set_transmission", handles.SetTransmission) + setting.POST("/set_115", handles.Set115) + setting.POST("/set_pikpak", handles.SetPikPak) + setting.POST("/set_thunder", handles.SetThunder) // retain /admin/task API to ensure compatibility with legacy automation scripts _task(g.Group("/task")) From 880cc7abca72b86877efad275a845de9f8a2a1d0 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sat, 18 Jan 2025 23:24:09 +0800 Subject: [PATCH 082/187] fix(139): use `personal_new` by default (#7836) --- drivers/139/meta.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/139/meta.go b/drivers/139/meta.go index 680e469d..d80b8566 100644 --- a/drivers/139/meta.go +++ b/drivers/139/meta.go @@ -9,7 +9,7 @@ type Addition struct { //Account string `json:"account" required:"true"` Authorization string `json:"authorization" type:"text" required:"true"` driver.RootID - Type string `json:"type" type:"select" options:"personal,family,personal_new" default:"personal"` + Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"` CloudID string `json:"cloud_id"` CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` } From ab22cf823345f4f6d5473f9aa6fa31cefcefe1b0 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Sat, 18 Jan 2025 23:26:58 +0800 Subject: [PATCH 083/187] feat: add `Reference` interface to driver (#7805) * feat: add `Reference` interface to driver * feat(123_share): support reference 123pan --- drivers/123/driver.go | 23 +++--- drivers/123/upload.go | 6 +- drivers/123/util.go | 11 +-- drivers/123_share/driver.go | 15 +++- drivers/123_share/util.go | 3 + drivers/139/driver.go | 109 ++++++++++++++++------------- drivers/139/util.go | 26 +++++-- drivers/189pc/driver.go | 39 +++++++---- drivers/189pc/utils.go | 37 +++++++--- drivers/aliyundrive_open/driver.go | 12 +++- drivers/aliyundrive_open/meta.go | 5 +- drivers/aliyundrive_open/upload.go | 2 +- drivers/aliyundrive_open/util.go | 18 +++-- internal/driver/driver.go | 4 ++ internal/op/storage.go | 24 +++++++ 15 files changed, 230 insertions(+), 104 deletions(-) diff --git a/drivers/123/driver.go b/drivers/123/driver.go index 3620431d..3828a59d 100644 --- a/drivers/123/driver.go +++ b/drivers/123/driver.go @@ -6,13 +6,14 @@ import ( "encoding/base64" "encoding/hex" "fmt" - "golang.org/x/time/rate" "io" "net/http" "net/url" "sync" "time" + "golang.org/x/time/rate" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -41,12 +42,12 @@ func (d *Pan123) GetAddition() driver.Additional { } func (d *Pan123) Init(ctx context.Context) error { - _, err := d.request(UserInfo, http.MethodGet, nil, nil) + _, err := d.Request(UserInfo, http.MethodGet, nil, nil) return err } func (d *Pan123) Drop(ctx context.Context) error { - _, _ = d.request(Logout, http.MethodPost, func(req *resty.Request) { + _, _ = d.Request(Logout, http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{}) }, nil) return nil @@ -81,8 +82,8 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) "size": f.Size, "type": f.Type, } - resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) { - + resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) { + req.SetBody(data).SetHeaders(headers) }, nil) if err != nil { @@ -135,7 +136,7 @@ func (d *Pan123) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin "size": 0, "type": 1, } - _, err := d.request(Mkdir, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) { req.SetBody(data) }, nil) return err @@ -146,7 +147,7 @@ func (d *Pan123) Move(ctx context.Context, srcObj, dstDir model.Obj) error { "fileIdList": []base.Json{{"FileId": srcObj.GetID()}}, "parentFileId": dstDir.GetID(), } - _, err := d.request(Move, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(Move, http.MethodPost, func(req *resty.Request) { req.SetBody(data) }, nil) return err @@ -158,7 +159,7 @@ func (d *Pan123) Rename(ctx context.Context, srcObj model.Obj, newName string) e "fileId": srcObj.GetID(), "fileName": newName, } - _, err := d.request(Rename, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(Rename, http.MethodPost, func(req *resty.Request) { req.SetBody(data) }, nil) return err @@ -175,7 +176,7 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error { "operation": true, "fileTrashInfoList": []File{f}, } - _, err := d.request(Trash, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(Trash, http.MethodPost, func(req *resty.Request) { req.SetBody(data) }, nil) return err @@ -213,7 +214,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "type": 0, } var resp UploadResp - res, err := d.request(UploadRequest, http.MethodPost, func(req *resty.Request) { + res, err := d.Request(UploadRequest, http.MethodPost, func(req *resty.Request) { req.SetBody(data).SetContext(ctx) }, &resp) if err != nil { @@ -248,7 +249,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } _, err = uploader.UploadWithContext(ctx, input) } - _, err = d.request(UploadComplete, http.MethodPost, func(req *resty.Request) { + _, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "fileId": resp.Data.FileId, }).SetContext(ctx) diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 6f6221f1..66627b4c 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -25,7 +25,7 @@ func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, star "StorageNode": upReq.Data.StorageNode, } var s3PreSignedUrls S3PreSignedURLs - _, err := d.request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) { req.SetBody(data).SetContext(ctx) }, &s3PreSignedUrls) if err != nil { @@ -44,7 +44,7 @@ func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end in "uploadId": upReq.Data.UploadId, } var s3PreSignedUrls S3PreSignedURLs - _, err := d.request(S3Auth, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(S3Auth, http.MethodPost, func(req *resty.Request) { req.SetBody(data).SetContext(ctx) }, &s3PreSignedUrls) if err != nil { @@ -63,7 +63,7 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F "key": upReq.Data.Key, "uploadId": upReq.Data.UploadId, } - _, err := d.request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) { req.SetBody(data).SetContext(ctx) }, nil) return err diff --git a/drivers/123/util.go b/drivers/123/util.go index 6365b1c9..7e5a2397 100644 --- a/drivers/123/util.go +++ b/drivers/123/util.go @@ -194,7 +194,9 @@ func (d *Pan123) login() error { // return &authKey, nil //} -func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { +func (d *Pan123) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + isRetry := false +do: req := base.RestyClient.R() req.SetHeaders(map[string]string{ "origin": "https://www.123pan.com", @@ -223,12 +225,13 @@ func (d *Pan123) request(url string, method string, callback base.ReqCallback, r body := res.Body() code := utils.Json.Get(body, "code").ToInt() if code != 0 { - if code == 401 { + if !isRetry && code == 401 { err := d.login() if err != nil { return nil, err } - return d.request(url, method, callback, resp) + isRetry = true + goto do } return nil, errors.New(jsoniter.Get(body, "message").ToString()) } @@ -260,7 +263,7 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([] "operateType": "4", "inDirectSpace": "false", } - _res, err := d.request(FileList, http.MethodGet, func(req *resty.Request) { + _res, err := d.Request(FileList, http.MethodGet, func(req *resty.Request) { req.SetQueryParams(query) }, &resp) if err != nil { diff --git a/drivers/123_share/driver.go b/drivers/123_share/driver.go index 9c1f3803..640fb749 100644 --- a/drivers/123_share/driver.go +++ b/drivers/123_share/driver.go @@ -4,12 +4,14 @@ import ( "context" "encoding/base64" "fmt" - "golang.org/x/time/rate" "net/http" "net/url" "sync" "time" + "golang.org/x/time/rate" + + _123 "github.com/alist-org/alist/v3/drivers/123" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -23,6 +25,7 @@ type Pan123Share struct { model.Storage Addition apiRateLimit sync.Map + ref *_123.Pan123 } func (d *Pan123Share) Config() driver.Config { @@ -39,7 +42,17 @@ func (d *Pan123Share) Init(ctx context.Context) error { return nil } +func (d *Pan123Share) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*_123.Pan123) + if ok { + d.ref = refStorage + return nil + } + return fmt.Errorf("ref: storage is not 123Pan") +} + func (d *Pan123Share) Drop(ctx context.Context) error { + d.ref = nil return nil } diff --git a/drivers/123_share/util.go b/drivers/123_share/util.go index 80ea8f0c..c2140bf6 100644 --- a/drivers/123_share/util.go +++ b/drivers/123_share/util.go @@ -53,6 +53,9 @@ func GetApi(rawUrl string) string { } func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + if d.ref != nil { + return d.ref.Request(url, method, callback, resp) + } req := base.RestyClient.R() req.SetHeaders(map[string]string{ "origin": "https://www.123pan.com", diff --git a/drivers/139/driver.go b/drivers/139/driver.go index dd154efe..ebb30e25 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -26,6 +26,7 @@ type Yun139 struct { Addition cron *cron.Cron Account string + ref *Yun139 } func (d *Yun139) Config() driver.Config { @@ -37,61 +38,73 @@ func (d *Yun139) GetAddition() driver.Additional { } func (d *Yun139) Init(ctx context.Context) error { - if d.Authorization == "" { - return fmt.Errorf("authorization is empty") - } - d.cron = cron.NewCron(time.Hour * 24 * 7) - d.cron.Do(func() { - err := d.refreshToken() - if err != nil { - log.Errorf("%+v", err) + if d.ref == nil { + if d.Authorization == "" { + return fmt.Errorf("authorization is empty") } - }) + d.cron = cron.NewCron(time.Hour * 24 * 7) + d.cron.Do(func() { + err := d.refreshToken() + if err != nil { + log.Errorf("%+v", err) + } + }) + } switch d.Addition.Type { case MetaPersonalNew: if len(d.Addition.RootFolderID) == 0 { d.RootFolderID = "/" } - return nil case MetaPersonal: if len(d.Addition.RootFolderID) == 0 { d.RootFolderID = "root" } - fallthrough case MetaGroup: if len(d.Addition.RootFolderID) == 0 { d.RootFolderID = d.CloudID } - fallthrough case MetaFamily: - decode, err := base64.StdEncoding.DecodeString(d.Authorization) - if err != nil { - return err - } - decodeStr := string(decode) - splits := strings.Split(decodeStr, ":") - if len(splits) < 2 { - return fmt.Errorf("authorization is invalid, splits < 2") - } - d.Account = splits[1] - _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ - "qryUserExternInfoReq": base.Json{ - "commonAccountInfo": base.Json{ - "account": d.Account, - "accountType": 1, - }, - }, - }, nil) - return err default: return errs.NotImplement } + if d.ref != nil { + return nil + } + decode, err := base64.StdEncoding.DecodeString(d.Authorization) + if err != nil { + return err + } + decodeStr := string(decode) + splits := strings.Split(decodeStr, ":") + if len(splits) < 2 { + return fmt.Errorf("authorization is invalid, splits < 2") + } + d.Account = splits[1] + _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ + "qryUserExternInfoReq": base.Json{ + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + }, + }, nil) + return err +} + +func (d *Yun139) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*Yun139) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport } func (d *Yun139) Drop(ctx context.Context) error { if d.cron != nil { d.cron.Stop() } + d.ref = nil return nil } @@ -150,7 +163,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin "parentCatalogID": parentDir.GetID(), "newCatalogName": dirName, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, }, @@ -161,7 +174,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin data := base.Json{ "cloudID": d.CloudID, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, "docLibName": dirName, @@ -173,7 +186,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin data := base.Json{ "catalogName": dirName, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, "groupID": d.CloudID, @@ -219,7 +232,7 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, "contentList": contentList, "catalogList": catalogList, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -247,7 +260,7 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, "newCatalogID": dstDir.GetID(), }, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, }, @@ -282,7 +295,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "catalogID": srcObj.GetID(), "catalogName": newName, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -292,7 +305,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "contentID": srcObj.GetID(), "contentName": newName, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -309,7 +322,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "modifyCatalogName": newName, "path": srcObj.GetPath(), "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -321,7 +334,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "contentName": newName, "path": srcObj.GetPath(), "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -338,7 +351,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e // "catalogID": srcObj.GetID(), // "catalogName": newName, // "commonAccountInfo": base.Json{ - // "account": d.Account, + // "account": d.getAccount(), // "accountType": 1, // }, // "path": srcObj.GetPath(), @@ -350,7 +363,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "contentID": srcObj.GetID(), "contentName": newName, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, "path": srcObj.GetPath(), @@ -393,7 +406,7 @@ func (d *Yun139) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { "newCatalogID": dstDir.GetID(), }, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, }, @@ -430,7 +443,7 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { "contentList": contentList, "catalogList": catalogList, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -457,7 +470,7 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { "catalogInfoList": catalogInfoList, }, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, }, @@ -468,7 +481,7 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { "catalogList": catalogInfoList, "contentList": contentInfoList, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, "sourceCloudID": d.CloudID, @@ -598,7 +611,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "uploadId": resp.Data.UploadId, "partInfos": batchPartInfos, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -735,7 +748,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "parentCatalogID": dstDir.GetID(), "newCatalogName": "", "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } diff --git a/drivers/139/util.go b/drivers/139/util.go index d0b4d3b4..2dade250 100644 --- a/drivers/139/util.go +++ b/drivers/139/util.go @@ -54,6 +54,9 @@ func getTime(t string) time.Time { } func (d *Yun139) refreshToken() error { + if d.ref == nil { + return d.ref.refreshToken() + } url := "https://aas.caiyun.feixin.10086.cn:443/tellin/authTokenRefresh.do" var resp RefreshTokenResp decode, err := base64.StdEncoding.DecodeString(d.Authorization) @@ -99,7 +102,7 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba req.SetHeaders(map[string]string{ "Accept": "application/json, text/plain, */*", "CMS-DEVICE": "default", - "Authorization": "Basic " + d.Authorization, + "Authorization": "Basic " + d.getAuthorization(), "mcloud-channel": "1000101", "mcloud-client": "10701", //"mcloud-route": "001", @@ -151,7 +154,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) { "catalogSortType": 0, "contentSortType": 0, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -199,7 +202,7 @@ func (d *Yun139) newJson(data map[string]interface{}) base.Json { "cloudID": d.CloudID, "cloudType": 1, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -320,7 +323,7 @@ func (d *Yun139) getLink(contentId string) (string, error) { "appName": "", "contentID": contentId, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -383,7 +386,7 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R } req.SetHeaders(map[string]string{ "Accept": "application/json, text/plain, */*", - "Authorization": "Basic " + d.Authorization, + "Authorization": "Basic " + d.getAuthorization(), "Caller": "web", "Cms-Device": "default", "Mcloud-Channel": "1000101", @@ -514,3 +517,16 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) { return jsoniter.Get(res, "data", "url").ToString(), nil } } + +func (d *Yun139) getAuthorization() string { + if d.ref != nil { + return d.ref.getAuthorization() + } + return d.Authorization +} +func (d *Yun139) getAccount() string { + if d.ref != nil { + return d.ref.getAccount() + } + return d.Account +} diff --git a/drivers/189pc/driver.go b/drivers/189pc/driver.go index 9c01a50f..6b502de0 100644 --- a/drivers/189pc/driver.go +++ b/drivers/189pc/driver.go @@ -33,6 +33,7 @@ type Cloud189PC struct { cleanFamilyTransferFile func() storageConfig driver.Config + ref *Cloud189PC } func (y *Cloud189PC) Config() driver.Config { @@ -64,20 +65,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) { y.uploadThread, y.UploadThread = 3, "3" } - // 初始化请求客户端 - if y.client == nil { - y.client = base.NewRestyClient().SetHeaders(map[string]string{ - "Accept": "application/json;charset=UTF-8", - "Referer": WEB_URL, - }) - } + if y.ref == nil { + // 初始化请求客户端 + if y.client == nil { + y.client = base.NewRestyClient().SetHeaders(map[string]string{ + "Accept": "application/json;charset=UTF-8", + "Referer": WEB_URL, + }) + } - // 避免重复登陆 - identity := utils.GetMD5EncodeStr(y.Username + y.Password) - if !y.isLogin() || y.identity != identity { - y.identity = identity - if err = y.login(); err != nil { - return + // 避免重复登陆 + identity := utils.GetMD5EncodeStr(y.Username + y.Password) + if !y.isLogin() || y.identity != identity { + y.identity = identity + if err = y.login(); err != nil { + return + } } } @@ -103,7 +106,17 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) { return } +func (d *Cloud189PC) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*Cloud189PC) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport +} + func (y *Cloud189PC) Drop(ctx context.Context) error { + y.ref = nil return nil } diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index f5a44455..0c3e5404 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -57,11 +57,11 @@ const ( func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string { dateOfGmt := getHttpDateStr() - sessionKey := y.tokenInfo.SessionKey - sessionSecret := y.tokenInfo.SessionSecret + sessionKey := y.getTokenInfo().SessionKey + sessionSecret := y.getTokenInfo().SessionSecret if isFamily { - sessionKey = y.tokenInfo.FamilySessionKey - sessionSecret = y.tokenInfo.FamilySessionSecret + sessionKey = y.getTokenInfo().FamilySessionKey + sessionSecret = y.getTokenInfo().FamilySessionSecret } header := map[string]string{ @@ -74,9 +74,9 @@ func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) } func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string { - sessionSecret := y.tokenInfo.SessionSecret + sessionSecret := y.getTokenInfo().SessionSecret if isFamily { - sessionSecret = y.tokenInfo.FamilySessionSecret + sessionSecret = y.getTokenInfo().FamilySessionSecret } if params != nil { return AesECBEncrypt(params.Encode(), sessionSecret[:16]) @@ -85,7 +85,7 @@ func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string { } func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) { - req := y.client.R().SetQueryParams(clientSuffix()) + req := y.getClient().R().SetQueryParams(clientSuffix()) // 设置params paramsData := y.EncryptParams(params, isBool(isFamily...)) @@ -403,6 +403,9 @@ func (y *Cloud189PC) initLoginParam() error { // 刷新会话 func (y *Cloud189PC) refreshSession() (err error) { + if y.ref != nil { + return y.ref.refreshSession() + } var erron RespErr var userSessionResp UserSessionResp _, err = y.client.R(). @@ -620,7 +623,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode } // 尝试恢复进度 - uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex) + uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.getTokenInfo().SessionKey, fileMd5Hex) if !ok { //step.2 预上传 params := Params{ @@ -687,7 +690,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode if err = threadG.Wait(); err != nil { if errors.Is(err, context.Canceled) { uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" }) - base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex) + base.SaveUploadProgress(y, uploadProgress, y.getTokenInfo().SessionKey, fileMd5Hex) } return nil, err } @@ -1008,7 +1011,7 @@ func (y *Cloud189PC) getFamilyID() (string, error) { return "", fmt.Errorf("cannot get automatically,please input family_id") } for _, info := range infos { - if strings.Contains(y.tokenInfo.LoginName, info.RemarkName) { + if strings.Contains(y.getTokenInfo().LoginName, info.RemarkName) { return fmt.Sprint(info.FamilyID), nil } } @@ -1142,3 +1145,17 @@ func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) time.Sleep(t) } } + +func (y *Cloud189PC) getTokenInfo() *AppSessionResp { + if y.ref != nil { + return y.ref.getTokenInfo() + } + return y.tokenInfo +} + +func (y *Cloud189PC) getClient() *resty.Client { + if y.ref != nil { + return y.ref.getClient() + } + return y.client +} diff --git a/drivers/aliyundrive_open/driver.go b/drivers/aliyundrive_open/driver.go index 4029ad57..a65ba05c 100644 --- a/drivers/aliyundrive_open/driver.go +++ b/drivers/aliyundrive_open/driver.go @@ -19,12 +19,12 @@ import ( type AliyundriveOpen struct { model.Storage Addition - base string DriveId string limitList func(ctx context.Context, data base.Json) (*Files, error) limitLink func(ctx context.Context, file model.Obj) (*model.Link, error) + ref *AliyundriveOpen } func (d *AliyundriveOpen) Config() driver.Config { @@ -58,7 +58,17 @@ func (d *AliyundriveOpen) Init(ctx context.Context) error { return nil } +func (d *AliyundriveOpen) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*AliyundriveOpen) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport +} + func (d *AliyundriveOpen) Drop(ctx context.Context) error { + d.ref = nil return nil } diff --git a/drivers/aliyundrive_open/meta.go b/drivers/aliyundrive_open/meta.go index 58013143..03f97f8b 100644 --- a/drivers/aliyundrive_open/meta.go +++ b/drivers/aliyundrive_open/meta.go @@ -32,11 +32,10 @@ var config = driver.Config{ DefaultRoot: "root", NoOverwriteUpload: true, } +var API_URL = "https://openapi.alipan.com" func init() { op.RegisterDriver(func() driver.Driver { - return &AliyundriveOpen{ - base: "https://openapi.alipan.com", - } + return &AliyundriveOpen{} }) } diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index d152836c..653a2442 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -126,7 +126,7 @@ func getProofRange(input string, size int64) (*ProofRange, error) { } func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) { - proofRange, err := getProofRange(d.AccessToken, stream.GetSize()) + proofRange, err := getProofRange(d.getAccessToken(), stream.GetSize()) if err != nil { return "", err } diff --git a/drivers/aliyundrive_open/util.go b/drivers/aliyundrive_open/util.go index 331e6400..659d7da7 100644 --- a/drivers/aliyundrive_open/util.go +++ b/drivers/aliyundrive_open/util.go @@ -19,7 +19,7 @@ import ( // do others that not defined in Driver interface func (d *AliyundriveOpen) _refreshToken() (string, string, error) { - url := d.base + "/oauth/access_token" + url := API_URL + "/oauth/access_token" if d.OauthTokenURL != "" && d.ClientID == "" { url = d.OauthTokenURL } @@ -74,6 +74,9 @@ func getSub(token string) (string, error) { } func (d *AliyundriveOpen) refreshToken() error { + if d.ref != nil { + return d.ref.refreshToken() + } refresh, access, err := d._refreshToken() for i := 0; i < 3; i++ { if err == nil { @@ -100,7 +103,7 @@ func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) { req := base.RestyClient.R() // TODO check whether access_token is expired - req.SetHeader("Authorization", "Bearer "+d.AccessToken) + req.SetHeader("Authorization", "Bearer "+d.getAccessToken()) if method == http.MethodPost { req.SetHeader("Content-Type", "application/json") } @@ -109,7 +112,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base } var e ErrResp req.SetError(&e) - res, err := req.Execute(method, d.base+uri) + res, err := req.Execute(method, API_URL+uri) if err != nil { if res != nil { log.Errorf("[aliyundrive_open] request error: %s", res.String()) @@ -118,7 +121,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base } isRetry := len(retry) > 0 && retry[0] if e.Code != "" { - if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") { + if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") { err = d.refreshToken() if err != nil { return nil, err, nil @@ -176,3 +179,10 @@ func getNowTime() (time.Time, string) { nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z") return nowTime, nowTimeStr } + +func (d *AliyundriveOpen) getAccessToken() string { + if d.ref != nil { + return d.ref.getAccessToken() + } + return d.AccessToken +} diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 6fd5e8d6..4571110a 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -144,3 +144,7 @@ func NewProgress(total int64, up UpdateProgress) *Progress { up: up, } } + +type Reference interface { + InitReference(storage Driver) error +} diff --git a/internal/op/storage.go b/internal/op/storage.go index 7d8831f5..f957f95b 100644 --- a/internal/op/storage.go +++ b/internal/op/storage.go @@ -10,6 +10,7 @@ import ( "github.com/alist-org/alist/v3/internal/db" "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/generic_sync" "github.com/alist-org/alist/v3/pkg/utils" @@ -106,6 +107,29 @@ func initStorage(ctx context.Context, storage model.Storage, storageDriver drive }() // Unmarshal Addition err = utils.Json.UnmarshalFromString(driverStorage.Addition, storageDriver.GetAddition()) + if err == nil { + if ref, ok := storageDriver.(driver.Reference); ok { + if strings.HasPrefix(driverStorage.Remark, "ref:/") { + refMountPath := driverStorage.Remark + i := strings.Index(refMountPath, "\n") + if i > 0 { + refMountPath = refMountPath[4:i] + } else { + refMountPath = refMountPath[4:] + } + var refStorage driver.Driver + refStorage, err = GetStorageByMountPath(refMountPath) + if err != nil { + err = fmt.Errorf("ref: %w", err) + } else { + err = ref.InitReference(refStorage) + if err != nil && errs.IsNotSupportError(err) { + err = fmt.Errorf("ref: storage is not %s", storageDriver.Config().Name) + } + } + } + } + } if err == nil { err = storageDriver.Init(ctx) } From bb40e2e2cdd01b34ba6edfe09448d80e210af177 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Sat, 18 Jan 2025 23:28:12 +0800 Subject: [PATCH 084/187] feat(archive): archive manage (#7817) * feat(archive): archive management * fix(ftp-server): remove duplicate ReadAtSeeker realization * fix(archive): bad seeking of SeekableStream * fix(archive): split internal and driver extraction api * feat(archive): patch * fix(shutdown): clear decompress upload tasks * chore * feat(archive): support .iso format * chore --- cmd/kill.go | 54 +++ cmd/root.go | 1 + cmd/server.go | 2 + cmd/{stop.go => stop_default.go} | 12 +- cmd/stop_windows.go | 34 ++ go.mod | 33 +- go.sum | 244 +++++++++- internal/archive/all.go | 7 + internal/archive/archives/archives.go | 126 ++++++ internal/archive/archives/utils.go | 80 ++++ internal/archive/iso9660/iso9660.go | 96 ++++ internal/archive/iso9660/utils.go | 100 +++++ internal/archive/tool/base.go | 15 + internal/archive/tool/utils.go | 23 + internal/archive/zip/utils.go | 156 +++++++ internal/archive/zip/zip.go | 174 +++++++ internal/bootstrap/data/user.go | 15 +- .../patch/v3_41_0/grant_permission.go | 18 +- internal/bootstrap/task.go | 2 + internal/conf/config.go | 19 +- internal/driver/driver.go | 38 +- internal/errs/errors.go | 4 + internal/fs/archive.go | 395 ++++++++++++++++ internal/fs/fs.go | 41 ++ internal/model/archive.go | 49 ++ internal/model/args.go | 27 ++ internal/model/obj.go | 3 + internal/model/user.go | 10 + internal/op/archive.go | 424 ++++++++++++++++++ internal/stream/stream.go | 218 +++++++++ internal/task/manager.go | 20 + server/ftp/fsread.go | 49 +- server/handles/archive.go | 381 ++++++++++++++++ server/handles/down.go | 92 ++-- server/handles/task.go | 8 +- server/router.go | 11 + 36 files changed, 2854 insertions(+), 127 deletions(-) create mode 100644 cmd/kill.go rename cmd/{stop.go => stop_default.go} (87%) create mode 100644 cmd/stop_windows.go create mode 100644 internal/archive/all.go create mode 100644 internal/archive/archives/archives.go create mode 100644 internal/archive/archives/utils.go create mode 100644 internal/archive/iso9660/iso9660.go create mode 100644 internal/archive/iso9660/utils.go create mode 100644 internal/archive/tool/base.go create mode 100644 internal/archive/tool/utils.go create mode 100644 internal/archive/zip/utils.go create mode 100644 internal/archive/zip/zip.go create mode 100644 internal/fs/archive.go create mode 100644 internal/model/archive.go create mode 100644 internal/op/archive.go create mode 100644 internal/task/manager.go create mode 100644 server/handles/archive.go diff --git a/cmd/kill.go b/cmd/kill.go new file mode 100644 index 00000000..3378fd70 --- /dev/null +++ b/cmd/kill.go @@ -0,0 +1,54 @@ +package cmd + +import ( + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "os" +) + +// KillCmd represents the kill command +var KillCmd = &cobra.Command{ + Use: "kill", + Short: "Force kill alist server process by daemon/pid file", + Run: func(cmd *cobra.Command, args []string) { + kill() + }, +} + +func kill() { + initDaemon() + if pid == -1 { + log.Info("Seems not have been started. Try use `alist start` to start server.") + return + } + process, err := os.FindProcess(pid) + if err != nil { + log.Errorf("failed to find process by pid: %d, reason: %v", pid, process) + return + } + err = process.Kill() + if err != nil { + log.Errorf("failed to kill process %d: %v", pid, err) + } else { + log.Info("killed process: ", pid) + } + err = os.Remove(pidFile) + if err != nil { + log.Errorf("failed to remove pid file") + } + pid = -1 +} + +func init() { + RootCmd.AddCommand(KillCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // stopCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/cmd/root.go b/cmd/root.go index 6bd82b7a..59eb989c 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -6,6 +6,7 @@ import ( "github.com/alist-org/alist/v3/cmd/flags" _ "github.com/alist-org/alist/v3/drivers" + _ "github.com/alist-org/alist/v3/internal/archive" _ "github.com/alist-org/alist/v3/internal/offline_download" "github.com/spf13/cobra" ) diff --git a/cmd/server.go b/cmd/server.go index 3112a6a9..d9206cfe 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -6,6 +6,7 @@ import ( "fmt" ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" "github.com/KirCute/sftpd-alist" + "github.com/alist-org/alist/v3/internal/fs" "net" "net/http" "os" @@ -159,6 +160,7 @@ the address is defined in config file`, signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) <-quit utils.Log.Println("Shutdown server...") + fs.ArchiveContentUploadTaskManager.RemoveAll() Release() ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() diff --git a/cmd/stop.go b/cmd/stop_default.go similarity index 87% rename from cmd/stop.go rename to cmd/stop_default.go index 09fba7b7..8f133940 100644 --- a/cmd/stop.go +++ b/cmd/stop_default.go @@ -1,10 +1,10 @@ -/* -Copyright © 2022 NAME HERE -*/ +//go:build !windows + package cmd import ( "os" + "syscall" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -30,11 +30,11 @@ func stop() { log.Errorf("failed to find process by pid: %d, reason: %v", pid, process) return } - err = process.Kill() + err = process.Signal(syscall.SIGTERM) if err != nil { - log.Errorf("failed to kill process %d: %v", pid, err) + log.Errorf("failed to terminate process %d: %v", pid, err) } else { - log.Info("killed process: ", pid) + log.Info("terminated process: ", pid) } err = os.Remove(pidFile) if err != nil { diff --git a/cmd/stop_windows.go b/cmd/stop_windows.go new file mode 100644 index 00000000..e086eab1 --- /dev/null +++ b/cmd/stop_windows.go @@ -0,0 +1,34 @@ +//go:build windows + +package cmd + +import ( + "github.com/spf13/cobra" +) + +// StopCmd represents the stop command +var StopCmd = &cobra.Command{ + Use: "stop", + Short: "Same as the kill command", + Run: func(cmd *cobra.Command, args []string) { + stop() + }, +} + +func stop() { + kill() +} + +func init() { + RootCmd.AddCommand(StopCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // stopCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/go.mod b/go.mod index 7ca66e15..0693dcd3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/alist-org/alist/v3 -go 1.22.4 +go 1.23 + +toolchain go1.23.1 require ( github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 @@ -40,17 +42,20 @@ require ( github.com/ipfs/go-ipfs-api v0.7.0 github.com/jlaffaye/ftp v0.2.0 github.com/json-iterator/go v1.1.12 + github.com/kdomanski/iso9660 v0.4.0 github.com/larksuite/oapi-sdk-go/v3 v3.3.1 github.com/maruel/natural v1.1.1 github.com/meilisearch/meilisearch-go v0.27.2 + github.com/mholt/archives v0.1.0 github.com/minio/sio v0.4.0 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/ncw/swift/v2 v2.0.3 - github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 + github.com/orzogc/fake115uploader v0.6.2 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/pquerna/otp v1.4.0 github.com/rclone/rclone v1.67.0 + github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.11.0 github.com/spf13/cobra v1.8.1 @@ -61,6 +66,7 @@ require ( github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 github.com/xhofe/tache v0.1.3 github.com/xhofe/wopan-sdk-go v0.1.3 + github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e @@ -77,21 +83,32 @@ require ( ) require ( - github.com/BurntSushi/toml v0.3.1 // indirect + github.com/STARRY-S/zip v0.2.1 // indirect github.com/blevesearch/go-faiss v1.0.20 // indirect github.com/blevesearch/zapx/v16 v16.1.5 // indirect + github.com/bodgit/plumbing v1.3.0 // indirect + github.com/bodgit/sevenzip v1.6.0 // indirect + github.com/bodgit/windows v1.0.1 // indirect github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/charmbracelet/x/ansi v0.2.3 // indirect github.com/charmbracelet/x/term v0.2.0 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect + github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/fclairamb/go-log v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hekmon/cunits/v2 v2.1.0 // indirect github.com/ipfs/boxo v0.12.0 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect + github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect + github.com/therootcompany/xz v1.0.1 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + go4.org v0.0.0-20230225012048-214862532bf5 // indirect ) require ( @@ -99,8 +116,8 @@ require ( github.com/RoaringBitmap/roaring v1.9.3 // indirect github.com/abbot/go-http-auth v0.4.0 // indirect github.com/aead/ecdh v0.2.0 // indirect - github.com/andreburgaud/crypt2go v1.2.0 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect + github.com/andreburgaud/crypt2go v1.8.0 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect github.com/axgle/mahonia v0.0.0-20180208002826-3358181d7394 github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/benbjohnson/clock v1.3.0 // indirect @@ -161,7 +178,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect - github.com/klauspost/compress v1.17.8 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kr/fs v0.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect @@ -196,7 +213,7 @@ require ( github.com/multiformats/go-varint v0.0.7 // indirect github.com/otiai10/copy v1.14.0 github.com/pelletier/go-toml/v2 v2.2.2 // indirect - github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect @@ -228,7 +245,7 @@ require ( golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/text v0.21.0 golang.org/x/tools v0.24.0 // indirect google.golang.org/api v0.169.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect diff --git a/go.sum b/go.sum index 101a0bea..9d92a935 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,27 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po= github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E= github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnOuISdg= @@ -12,6 +30,8 @@ github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9 github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= +github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= +github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= github.com/SheltonZhu/115driver v1.0.32 h1:Taw1bnfcPJZW0xTdhDvEbBS1tccif7J7DslRp2NkDyQ= github.com/SheltonZhu/115driver v1.0.32/go.mod h1:XXFi23pyhAgzUE8dUEKdGvIdUQKi3wv6zR7C1Do40D8= github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A= @@ -30,10 +50,11 @@ github.com/alist-org/times v0.0.0-20240721124654-efa0c7d3ad92 h1:pIEI87zhv8ZzQcu github.com/alist-org/times v0.0.0-20240721124654-efa0c7d3ad92/go.mod h1:oPJwGY3sLmGgcJamGumz//0A35f4BwQRacyqLNcJTOU= github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g= github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/andreburgaud/crypt2go v1.2.0 h1:oly/ENAodeqTYpUafgd4r3v+VKLQnmOKUyfpj+TxHbE= -github.com/andreburgaud/crypt2go v1.2.0/go.mod h1:kKRqlrX/3Q9Ki7HdUsoh0cX1Urq14/Hcta4l4VrIXrI= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= +github.com/andreburgaud/crypt2go v1.8.0 h1:J73vGTb1P6XL69SSuumbKs0DWn3ulbl9L92ZXBjw6pc= +github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAPJAF5fKOLB9SXg= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -91,6 +112,12 @@ github.com/blevesearch/zapx/v16 v16.1.5 h1:b0sMcarqNFxuXvjoXsF8WtwVahnxyhEvBSRJi github.com/blevesearch/zapx/v16 v16.1.5/go.mod h1:J4mSF39w1QELc11EWRSBFkPeZuO7r/NPKkHzDCoiaI8= github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= +github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= +github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= +github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A= +github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc= +github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= +github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= @@ -99,6 +126,7 @@ github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3z github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc= github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= @@ -115,8 +143,12 @@ github.com/charmbracelet/x/term v0.2.0 h1:cNB9Ot9q8I711MyZ7myUR5HFWL/lc3OpU8jZ4h github.com/charmbracelet/x/term v0.2.0/go.mod h1:GVxgxAbjUrmpvIINHIQnJJKpMlHiZ4cktEQCN6GWyF0= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA= github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= @@ -145,8 +177,13 @@ github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1 github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo= github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4= +github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM= github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= github.com/fclairamb/go-log v0.5.0 h1:Gz9wSamEaA6lta4IU2cjJc2xSq5sV5VYSB5w/SUHhVc= @@ -175,6 +212,8 @@ github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= @@ -220,14 +259,32 @@ github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17w github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo= github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -236,6 +293,11 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-tpm v0.9.1 h1:0pGc4X//bAlmZzMKf8iz6IsDo1nYTbYJ6FZN/rg4zdM= github.com/google/go-tpm v0.9.1/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -243,6 +305,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -257,13 +321,18 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI0= github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M= github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ= github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg= github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI= github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= @@ -297,18 +366,26 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg= github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c= +github.com/kdomanski/iso9660 v0.4.0 h1:BPKKdcINz3m0MdjIMwS0wx1nofsOjxOq8TOr45WGHFg= +github.com/kdomanski/iso9660 v0.4.0/go.mod h1:OxUSupHsO9ceI8lBLPJKWBTphLemjrCQY8LPXM7qSzU= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -355,6 +432,8 @@ github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/meilisearch/meilisearch-go v0.27.2 h1:3G21dJ5i208shnLPDsIEZ0L0Geg/5oeXABFV7nlK94k= github.com/meilisearch/meilisearch-go v0.27.2/go.mod h1:SxuSqDcPBIykjWz1PX+KzsYzArNLSCadQodWs8extS0= +github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q= +github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/minio/sio v0.4.0 h1:u4SWVEm5lXSqU42ZWawV0D9I5AZ5YMmo2RXpEQ/kRhc= @@ -400,8 +479,10 @@ github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4 github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= -github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 h1:K3T3eu4h5aYIOzUtLjN08L4Qt4WGaJONMgcaD0ayBJQ= -github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831/go.mod h1:lSHD4lC4zlMl+zcoysdJcd5KFzsWwOD8BJbyg1Ws9Ng= +github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= +github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= +github.com/orzogc/fake115uploader v0.6.2 h1:f4LzqeeXpmY7DjOMnzmAnnPTPMA/f/BUclq4ecffTvU= +github.com/orzogc/fake115uploader v0.6.2/go.mod h1:Mqqwv1+gUEjJhUfIQanco3DCTKp+7lSx8DJ3AoRwMoE= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= @@ -410,8 +491,8 @@ github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OI github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= -github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -432,6 +513,7 @@ github.com/pquerna/otp v1.4.0 h1:wZvl1TIVxKRThZIBiwOOHOGP/1+nZyWBil9Y2XNEDzg= github.com/pquerna/otp v1.4.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= @@ -445,13 +527,17 @@ github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Ny github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= +github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA= +github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo= github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM= @@ -469,6 +555,8 @@ github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1 github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg= +github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -501,6 +589,8 @@ github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3 github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA= github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 h1:6Y51mutOvRGRx6KqyMNo//xk8B8o6zW9/RVmy1VamOs= github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543/go.mod h1:jpwqYA8KUVEvSUJHkCXsnBRJCSKP1BMa81QZ6kvRpow= +github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= +github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= @@ -517,6 +607,9 @@ github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6 github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/upyun/go-sdk/v3 v3.0.4 h1:2DCJa/Yi7/3ZybT9UCPATSzvU3wpPPxhXinNlb1Hi8Q= github.com/upyun/go-sdk/v3 v3.0.4/go.mod h1:P/SnuuwhrIgAVRd/ZpzDWqCsBAf/oHg7UggbAxyZa0E= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= @@ -536,6 +629,10 @@ github.com/xhofe/tache v0.1.3 h1:MipxzlljYX29E1YI/SLC7hVomVF+51iP1OUzlsuq1wE= github.com/xhofe/tache v0.1.3/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ= github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A= github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 h1:K8gF0eekWPEX+57l30ixxzGhHH/qscI3JCnuhbN6V4M= +github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhfAXj15352hTOuVmG5Gzo8xNRINfqI= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -545,6 +642,10 @@ github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 h1:X+lH github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22/go.mod h1:1zGRDJd8zlG6P8azG96+uywfh6udYWwhOmUivw+xsuM= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= @@ -555,12 +656,16 @@ go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGX go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= +go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -576,11 +681,35 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.19.0 h1:D9FX4QWkLfkeqaC62SonffIIuYdOk/UE2XKUBgRIBIQ= golang.org/x/image v0.19.0/go.mod h1:y0zrRqlQRWQ5PXaYCOMLTW2fpsxZ8Qh9I/ohnInJEys= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -588,8 +717,19 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -600,6 +740,7 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= @@ -607,8 +748,17 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -619,10 +769,20 @@ golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -664,7 +824,9 @@ golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -679,6 +841,7 @@ golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= @@ -686,8 +849,30 @@ golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -700,12 +885,45 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= @@ -744,8 +962,16 @@ gorm.io/driver/sqlite v1.5.6/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDa gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg= gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/archive/all.go b/internal/archive/all.go new file mode 100644 index 00000000..18167933 --- /dev/null +++ b/internal/archive/all.go @@ -0,0 +1,7 @@ +package archive + +import ( + _ "github.com/alist-org/alist/v3/internal/archive/archives" + _ "github.com/alist-org/alist/v3/internal/archive/iso9660" + _ "github.com/alist-org/alist/v3/internal/archive/zip" +) diff --git a/internal/archive/archives/archives.go b/internal/archive/archives/archives.go new file mode 100644 index 00000000..b70ba95b --- /dev/null +++ b/internal/archive/archives/archives.go @@ -0,0 +1,126 @@ +package archives + +import ( + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/utils" + "io" + "io/fs" + "os" + stdpath "path" + "strings" +) + +type Archives struct { +} + +func (_ *Archives) AcceptedExtensions() []string { + return []string{ + ".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z", + } +} + +func (_ *Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + fsys, err := getFs(ss, args) + if err != nil { + return nil, err + } + _, err = fsys.ReadDir(".") + if err != nil { + return nil, filterPassword(err) + } + return &model.ArchiveMetaInfo{ + Comment: "", + Encrypted: false, + }, nil +} + +func (_ *Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + fsys, err := getFs(ss, args.ArchiveArgs) + if err != nil { + return nil, err + } + innerPath := strings.TrimPrefix(args.InnerPath, "/") + if innerPath == "" { + innerPath = "." + } + obj, err := fsys.ReadDir(innerPath) + if err != nil { + return nil, filterPassword(err) + } + return utils.SliceConvert(obj, func(src os.DirEntry) (model.Obj, error) { + info, err := src.Info() + if err != nil { + return nil, err + } + return toModelObj(info), nil + }) +} + +func (_ *Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + fsys, err := getFs(ss, args.ArchiveArgs) + if err != nil { + return nil, 0, err + } + file, err := fsys.Open(strings.TrimPrefix(args.InnerPath, "/")) + if err != nil { + return nil, 0, filterPassword(err) + } + stat, err := file.Stat() + if err != nil { + return nil, 0, filterPassword(err) + } + return file, stat.Size(), nil +} + +func (_ *Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + fsys, err := getFs(ss, args.ArchiveArgs) + if err != nil { + return err + } + isDir := false + path := strings.TrimPrefix(args.InnerPath, "/") + if path == "" { + isDir = true + path = "." + } else { + stat, err := fsys.Stat(path) + if err != nil { + return filterPassword(err) + } + if stat.IsDir() { + isDir = true + outputPath = stdpath.Join(outputPath, stat.Name()) + err = os.Mkdir(outputPath, 0700) + if err != nil { + return filterPassword(err) + } + } + } + if isDir { + err = fs.WalkDir(fsys, path, func(p string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + relPath := strings.TrimPrefix(p, path+"/") + dstPath := stdpath.Join(outputPath, relPath) + if d.IsDir() { + err = os.MkdirAll(dstPath, 0700) + } else { + dir := stdpath.Dir(dstPath) + err = decompress(fsys, p, dir, func(_ float64) {}) + } + return err + }) + } else { + err = decompress(fsys, path, outputPath, up) + } + return filterPassword(err) +} + +var _ tool.Tool = (*Archives)(nil) + +func init() { + tool.RegisterTool(&Archives{}) +} diff --git a/internal/archive/archives/utils.go b/internal/archive/archives/utils.go new file mode 100644 index 00000000..b72e6bc6 --- /dev/null +++ b/internal/archive/archives/utils.go @@ -0,0 +1,80 @@ +package archives + +import ( + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/mholt/archives" + "io" + fs2 "io/fs" + "os" + stdpath "path" + "strings" +) + +func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.ArchiveFS, error) { + reader, err := stream.NewReadAtSeeker(ss, 0) + if err != nil { + return nil, err + } + format, _, err := archives.Identify(ss.Ctx, ss.GetName(), reader) + if err != nil { + return nil, errs.UnknownArchiveFormat + } + extractor, ok := format.(archives.Extractor) + if !ok { + return nil, errs.UnknownArchiveFormat + } + switch f := format.(type) { + case archives.SevenZip: + f.Password = args.Password + case archives.Rar: + f.Password = args.Password + } + return &archives.ArchiveFS{ + Stream: io.NewSectionReader(reader, 0, ss.GetSize()), + Format: extractor, + Context: ss.Ctx, + }, nil +} + +func toModelObj(file os.FileInfo) *model.Object { + return &model.Object{ + Name: file.Name(), + Size: file.Size(), + Modified: file.ModTime(), + IsFolder: file.IsDir(), + } +} + +func filterPassword(err error) error { + if err != nil && strings.Contains(err.Error(), "password") { + return errs.WrongArchivePassword + } + return err +} + +func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgress) error { + rc, err := fsys.Open(filePath) + if err != nil { + return err + } + defer rc.Close() + stat, err := rc.Stat() + if err != nil { + return err + } + f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(f, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: rc, + Size: stat.Size(), + }, + UpdateProgress: up, + }) + return err +} diff --git a/internal/archive/iso9660/iso9660.go b/internal/archive/iso9660/iso9660.go new file mode 100644 index 00000000..e9cb3f53 --- /dev/null +++ b/internal/archive/iso9660/iso9660.go @@ -0,0 +1,96 @@ +package iso9660 + +import ( + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/kdomanski/iso9660" + "io" + "os" + stdpath "path" +) + +type ISO9660 struct { +} + +func (t *ISO9660) AcceptedExtensions() []string { + return []string{".iso"} +} + +func (t *ISO9660) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + return &model.ArchiveMetaInfo{ + Comment: "", + Encrypted: false, + }, nil +} + +func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + img, err := getImage(ss) + if err != nil { + return nil, err + } + dir, err := getObj(img, args.InnerPath) + if err != nil { + return nil, err + } + if !dir.IsDir() { + return nil, errs.NotFolder + } + children, err := dir.GetChildren() + if err != nil { + return nil, err + } + ret := make([]model.Obj, 0, len(children)) + for _, child := range children { + ret = append(ret, toModelObj(child)) + } + return ret, nil +} + +func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + img, err := getImage(ss) + if err != nil { + return nil, 0, err + } + obj, err := getObj(img, args.InnerPath) + if err != nil { + return nil, 0, err + } + if obj.IsDir() { + return nil, 0, errs.NotFile + } + return io.NopCloser(obj.Reader()), obj.Size(), nil +} + +func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + img, err := getImage(ss) + if err != nil { + return err + } + obj, err := getObj(img, args.InnerPath) + if err != nil { + return err + } + if obj.IsDir() { + if args.InnerPath != "/" { + outputPath = stdpath.Join(outputPath, obj.Name()) + if err = os.MkdirAll(outputPath, 0700); err != nil { + return err + } + } + var children []*iso9660.File + if children, err = obj.GetChildren(); err == nil { + err = decompressAll(children, outputPath) + } + } else { + err = decompress(obj, outputPath, up) + } + return err +} + +var _ tool.Tool = (*ISO9660)(nil) + +func init() { + tool.RegisterTool(&ISO9660{}) +} diff --git a/internal/archive/iso9660/utils.go b/internal/archive/iso9660/utils.go new file mode 100644 index 00000000..12de8e6e --- /dev/null +++ b/internal/archive/iso9660/utils.go @@ -0,0 +1,100 @@ +package iso9660 + +import ( + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/kdomanski/iso9660" + "io" + "os" + stdpath "path" + "strings" +) + +func getImage(ss *stream.SeekableStream) (*iso9660.Image, error) { + reader, err := stream.NewReadAtSeeker(ss, 0) + if err != nil { + return nil, err + } + return iso9660.OpenImage(reader) +} + +func getObj(img *iso9660.Image, path string) (*iso9660.File, error) { + obj, err := img.RootDir() + if err != nil { + return nil, err + } + if path == "/" { + return obj, nil + } + paths := strings.Split(strings.TrimPrefix(path, "/"), "/") + for _, p := range paths { + if !obj.IsDir() { + return nil, errs.ObjectNotFound + } + children, err := obj.GetChildren() + if err != nil { + return nil, err + } + exist := false + for _, child := range children { + if child.Name() == p { + obj = child + exist = true + break + } + } + if !exist { + return nil, errs.ObjectNotFound + } + } + return obj, nil +} + +func toModelObj(file *iso9660.File) model.Obj { + return &model.Object{ + Name: file.Name(), + Size: file.Size(), + Modified: file.ModTime(), + IsFolder: file.IsDir(), + } +} + +func decompress(f *iso9660.File, path string, up model.UpdateProgress) error { + file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: f.Reader(), + Size: f.Size(), + }, + UpdateProgress: up, + }) + return err +} + +func decompressAll(children []*iso9660.File, path string) error { + for _, child := range children { + if child.IsDir() { + nextChildren, err := child.GetChildren() + if err != nil { + return err + } + nextPath := stdpath.Join(path, child.Name()) + if err = os.MkdirAll(nextPath, 0700); err != nil { + return err + } + if err = decompressAll(nextChildren, nextPath); err != nil { + return err + } + } else { + if err := decompress(child, path, func(_ float64) {}); err != nil { + return err + } + } + } + return nil +} diff --git a/internal/archive/tool/base.go b/internal/archive/tool/base.go new file mode 100644 index 00000000..08e96614 --- /dev/null +++ b/internal/archive/tool/base.go @@ -0,0 +1,15 @@ +package tool + +import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "io" +) + +type Tool interface { + AcceptedExtensions() []string + GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) + List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) + Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) + Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error +} diff --git a/internal/archive/tool/utils.go b/internal/archive/tool/utils.go new file mode 100644 index 00000000..822ee894 --- /dev/null +++ b/internal/archive/tool/utils.go @@ -0,0 +1,23 @@ +package tool + +import ( + "github.com/alist-org/alist/v3/internal/errs" +) + +var ( + Tools = make(map[string]Tool) +) + +func RegisterTool(tool Tool) { + for _, ext := range tool.AcceptedExtensions() { + Tools[ext] = tool + } +} + +func GetArchiveTool(ext string) (Tool, error) { + t, ok := Tools[ext] + if !ok { + return nil, errs.UnknownArchiveFormat + } + return t, nil +} diff --git a/internal/archive/zip/utils.go b/internal/archive/zip/utils.go new file mode 100644 index 00000000..81b47782 --- /dev/null +++ b/internal/archive/zip/utils.go @@ -0,0 +1,156 @@ +package zip + +import ( + "bytes" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/saintfish/chardet" + "github.com/yeka/zip" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/japanese" + "golang.org/x/text/encoding/korean" + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/encoding/traditionalchinese" + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/encoding/unicode/utf32" + "golang.org/x/text/transform" + "io" + "os" + stdpath "path" + "strings" +) + +func toModelObj(file os.FileInfo) *model.Object { + return &model.Object{ + Name: decodeName(file.Name()), + Size: file.Size(), + Modified: file.ModTime(), + IsFolder: file.IsDir(), + } +} + +func decompress(file *zip.File, filePath, outputPath, password string) error { + targetPath := outputPath + dir, base := stdpath.Split(filePath) + if dir != "" { + targetPath = stdpath.Join(targetPath, dir) + err := os.MkdirAll(targetPath, 0700) + if err != nil { + return err + } + } + if base != "" { + err := _decompress(file, targetPath, password, func(_ float64) {}) + if err != nil { + return err + } + } + return nil +} + +func _decompress(file *zip.File, targetPath, password string, up model.UpdateProgress) error { + if file.IsEncrypted() { + file.SetPassword(password) + } + rc, err := file.Open() + if err != nil { + return err + } + defer rc.Close() + f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(f, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: rc, + Size: file.FileInfo().Size(), + }, + UpdateProgress: up, + }) + if err != nil { + return err + } + return nil +} + +func filterPassword(err error) error { + if err != nil && strings.Contains(err.Error(), "password") { + return errs.WrongArchivePassword + } + return err +} + +func decodeName(name string) string { + b := []byte(name) + detector := chardet.NewTextDetector() + result, err := detector.DetectBest(b) + if err != nil { + return name + } + enc := getEncoding(result.Charset) + if enc == nil { + return name + } + i := bytes.NewReader(b) + decoder := transform.NewReader(i, enc.NewDecoder()) + content, _ := io.ReadAll(decoder) + return string(content) +} + +func getEncoding(name string) (enc encoding.Encoding) { + switch name { + case "UTF-16BE": + enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM) + case "UTF-16LE": + enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM) + case "UTF-32BE": + enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM) + case "UTF-32LE": + enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM) + case "ISO-8859-1": + enc = charmap.ISO8859_1 + case "ISO-8859-2": + enc = charmap.ISO8859_2 + case "ISO-8859-3": + enc = charmap.ISO8859_3 + case "ISO-8859-4": + enc = charmap.ISO8859_4 + case "ISO-8859-5": + enc = charmap.ISO8859_5 + case "ISO-8859-6": + enc = charmap.ISO8859_6 + case "ISO-8859-7": + enc = charmap.ISO8859_7 + case "ISO-8859-8": + enc = charmap.ISO8859_8 + case "ISO-8859-8-I": + enc = charmap.ISO8859_8I + case "ISO-8859-9": + enc = charmap.ISO8859_9 + case "windows-1251": + enc = charmap.Windows1251 + case "windows-1256": + enc = charmap.Windows1256 + case "KOI8-R": + enc = charmap.KOI8R + case "Shift_JIS": + enc = japanese.ShiftJIS + case "GB-18030": + enc = simplifiedchinese.GB18030 + case "EUC-JP": + enc = japanese.EUCJP + case "EUC-KR": + enc = korean.EUCKR + case "Big5": + enc = traditionalchinese.Big5 + case "ISO-2022-JP": + enc = japanese.ISO2022JP + default: + enc = nil + } + return +} diff --git a/internal/archive/zip/zip.go b/internal/archive/zip/zip.go new file mode 100644 index 00000000..ccb70e65 --- /dev/null +++ b/internal/archive/zip/zip.go @@ -0,0 +1,174 @@ +package zip + +import ( + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/yeka/zip" + "io" + "os" + stdpath "path" + "strings" +) + +type Zip struct { +} + +func (_ *Zip) AcceptedExtensions() []string { + return []string{".zip"} +} + +func (_ *Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + reader, err := stream.NewReadAtSeeker(ss, 0) + if err != nil { + return nil, err + } + zipReader, err := zip.NewReader(reader, ss.GetSize()) + if err != nil { + return nil, err + } + encrypted := false + for _, file := range zipReader.File { + if file.IsEncrypted() { + encrypted = true + break + } + } + return &model.ArchiveMetaInfo{ + Comment: zipReader.Comment, + Encrypted: encrypted, + }, nil +} + +func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + reader, err := stream.NewReadAtSeeker(ss, 0) + if err != nil { + return nil, err + } + zipReader, err := zip.NewReader(reader, ss.GetSize()) + if err != nil { + return nil, err + } + if args.InnerPath == "/" { + ret := make([]model.Obj, 0) + passVerified := false + for _, file := range zipReader.File { + if !passVerified && file.IsEncrypted() { + file.SetPassword(args.Password) + rc, e := file.Open() + if e != nil { + return nil, filterPassword(e) + } + _ = rc.Close() + passVerified = true + } + name := decodeName(file.Name) + if strings.Contains(strings.TrimSuffix(name, "/"), "/") { + continue + } + ret = append(ret, toModelObj(file.FileInfo())) + } + return ret, nil + } else { + innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/" + ret := make([]model.Obj, 0) + exist := false + for _, file := range zipReader.File { + name := decodeName(file.Name) + if name == innerPath { + exist = true + } + dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/" + if dir != innerPath { + continue + } + ret = append(ret, toModelObj(file.FileInfo())) + } + if !exist { + return nil, errs.ObjectNotFound + } + return ret, nil + } +} + +func (_ *Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + reader, err := stream.NewReadAtSeeker(ss, 0) + if err != nil { + return nil, 0, err + } + zipReader, err := zip.NewReader(reader, ss.GetSize()) + if err != nil { + return nil, 0, err + } + innerPath := strings.TrimPrefix(args.InnerPath, "/") + for _, file := range zipReader.File { + if decodeName(file.Name) == innerPath { + if file.IsEncrypted() { + file.SetPassword(args.Password) + } + r, e := file.Open() + if e != nil { + return nil, 0, e + } + return r, file.FileInfo().Size(), nil + } + } + return nil, 0, errs.ObjectNotFound +} + +func (_ *Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + reader, err := stream.NewReadAtSeeker(ss, 0) + if err != nil { + return err + } + zipReader, err := zip.NewReader(reader, ss.GetSize()) + if err != nil { + return err + } + if args.InnerPath == "/" { + for i, file := range zipReader.File { + name := decodeName(file.Name) + err = decompress(file, name, outputPath, args.Password) + if err != nil { + return err + } + up(float64(i+1) * 100.0 / float64(len(zipReader.File))) + } + } else { + innerPath := strings.TrimPrefix(args.InnerPath, "/") + innerBase := stdpath.Base(innerPath) + createdBaseDir := false + for _, file := range zipReader.File { + name := decodeName(file.Name) + if name == innerPath { + err = _decompress(file, outputPath, args.Password, up) + if err != nil { + return err + } + break + } else if strings.HasPrefix(name, innerPath+"/") { + targetPath := stdpath.Join(outputPath, innerBase) + if !createdBaseDir { + err = os.Mkdir(targetPath, 0700) + if err != nil { + return err + } + createdBaseDir = true + } + restPath := strings.TrimPrefix(name, innerPath+"/") + err = decompress(file, restPath, targetPath, args.Password) + if err != nil { + return err + } + } + } + } + return nil +} + +var _ tool.Tool = (*Zip)(nil) + +func init() { + tool.RegisterTool(&Zip{}) +} diff --git a/internal/bootstrap/data/user.go b/internal/bootstrap/data/user.go index 5b596a85..9c3f8962 100644 --- a/internal/bootstrap/data/user.go +++ b/internal/bootstrap/data/user.go @@ -26,13 +26,14 @@ func initUser() { if errors.Is(err, gorm.ErrRecordNotFound) { salt := random.String(16) admin = &model.User{ - Username: "admin", - Salt: salt, - PwdHash: model.TwoHashPwd(adminPassword, salt), - Role: model.ADMIN, - BasePath: "/", - Authn: "[]", - Permission: 0xFF, // 0(can see hidden) - 7(can remove) + Username: "admin", + Salt: salt, + PwdHash: model.TwoHashPwd(adminPassword, salt), + Role: model.ADMIN, + BasePath: "/", + Authn: "[]", + // 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives) + Permission: 0x30FF, } if err := op.CreateUser(admin); err != nil { panic(err) diff --git a/internal/bootstrap/patch/v3_41_0/grant_permission.go b/internal/bootstrap/patch/v3_41_0/grant_permission.go index d658d184..e62d1e8f 100644 --- a/internal/bootstrap/patch/v3_41_0/grant_permission.go +++ b/internal/bootstrap/patch/v3_41_0/grant_permission.go @@ -5,18 +5,20 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" ) -// GrantAdminPermissions gives admin Permission 0(can see hidden) - 9(webdav manage) -// This patch is written to help users upgrading from older version better adapt to PR AlistGo/alist#7705. +// GrantAdminPermissions gives admin Permission 0(can see hidden) - 9(webdav manage) and +// 12(can read archives) - 13(can decompress archives) +// This patch is written to help users upgrading from older version better adapt to PR AlistGo/alist#7705 and +// PR AlistGo/alist#7817. func GrantAdminPermissions() { admin, err := op.GetAdmin() if err != nil { utils.Log.Errorf("Cannot grant permissions to admin: %v", err) } - if (admin.Permission & 0x3FF) == 0 { - admin.Permission |= 0x3FF - } - err = op.UpdateUser(admin) - if err != nil { - utils.Log.Errorf("Cannot grant permissions to admin: %v", err) + if (admin.Permission & 0x33FF) == 0 { + admin.Permission |= 0x33FF + err = op.UpdateUser(admin) + if err != nil { + utils.Log.Errorf("Cannot grant permissions to admin: %v", err) + } } } diff --git a/internal/bootstrap/task.go b/internal/bootstrap/task.go index 33902353..9c30c392 100644 --- a/internal/bootstrap/task.go +++ b/internal/bootstrap/task.go @@ -16,4 +16,6 @@ func InitTaskManager() { if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted CleanTempDir() } + fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(conf.Conf.Tasks.Decompress.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry)) + fs.ArchiveContentUploadTaskManager.Manager = tache.NewManager[*fs.ArchiveContentUploadTask](tache.WithWorks(conf.Conf.Tasks.DecompressUpload.Workers), tache.WithMaxRetry(conf.Conf.Tasks.DecompressUpload.MaxRetry)) //decompress upload will not support persist } diff --git a/internal/conf/config.go b/internal/conf/config.go index d015cda0..4f5c2ae0 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -53,10 +53,12 @@ type TaskConfig struct { } type TasksConfig struct { - Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"` - Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"` - Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"` - Copy TaskConfig `json:"copy" envPrefix:"COPY_"` + Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"` + Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"` + Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"` + Copy TaskConfig `json:"copy" envPrefix:"COPY_"` + Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"` + DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"` } type Cors struct { @@ -169,6 +171,15 @@ func DefaultConfig() *Config { MaxRetry: 2, // TaskPersistant: true, }, + Decompress: TaskConfig{ + Workers: 5, + MaxRetry: 2, + // TaskPersistant: true, + }, + DecompressUpload: TaskConfig{ + Workers: 5, + MaxRetry: 2, + }, }, Cors: Cors{ AllowOrigins: []string{"*"}, diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 4571110a..09fd42e7 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -123,7 +123,43 @@ type PutURLResult interface { PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error) } -type UpdateProgress func(percentage float64) +type ArchiveReader interface { + // GetArchiveMeta get the meta-info of an archive + // return errs.WrongArchivePassword if the meta-info is also encrypted but provided password is wrong or empty + // return errs.NotImplement to use internal archive tools to get the meta-info, such as the following cases: + // 1. the driver do not support the format of the archive but there may be an internal tool do + // 2. handling archives is a VIP feature, but the driver does not have VIP access + GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) + // ListArchive list the children of model.ArchiveArgs.InnerPath in the archive + // return errs.NotImplement to use internal archive tools to list the children + // return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree + ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) + // Extract get url/filepath/reader of a file in the archive + // return errs.NotImplement to use internal archive tools to extract + Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) +} + +type ArchiveGetter interface { + // ArchiveGet get file by inner path + // return errs.NotImplement to use internal archive tools to get the children + // return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree + ArchiveGet(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (model.Obj, error) +} + +type ArchiveDecompress interface { + ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error +} + +type ArchiveDecompressResult interface { + // ArchiveDecompress decompress an archive + // when args.PutIntoNewDir, the new sub-folder should be named the same to the archive but without the extension + // return each decompressed obj from the root path of the archive when args.PutIntoNewDir is false + // return only the newly created folder when args.PutIntoNewDir is true + // return errs.NotImplement to use internal archive tools to decompress + ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) +} + +type UpdateProgress model.UpdateProgress type Progress struct { Total int64 diff --git a/internal/errs/errors.go b/internal/errs/errors.go index ecfe43e3..2a22dca1 100644 --- a/internal/errs/errors.go +++ b/internal/errs/errors.go @@ -19,6 +19,10 @@ var ( StorageNotFound = errors.New("storage not found") StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue") StreamPeekFail = errors.New("StreamPeekFail") + + UnknownArchiveFormat = errors.New("unknown archive format") + WrongArchivePassword = errors.New("wrong archive password") + DriverExtractNotSupported = errors.New("driver extraction not supported") ) // NewErr wrap constant error with an extra message diff --git a/internal/fs/archive.go b/internal/fs/archive.go new file mode 100644 index 00000000..f3e05926 --- /dev/null +++ b/internal/fs/archive.go @@ -0,0 +1,395 @@ +package fs + +import ( + "context" + stderrors "errors" + "fmt" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/xhofe/tache" + "io" + "math/rand" + "mime" + "net/http" + "os" + stdpath "path" + "path/filepath" + "strconv" + "strings" + "time" +) + +type ArchiveDownloadTask struct { + task.TaskExtension + model.ArchiveDecompressArgs + status string + SrcObjPath string + DstDirPath string + srcStorage driver.Driver + dstStorage driver.Driver + SrcStorageMp string + DstStorageMp string + Tool tool.Tool +} + +func (t *ArchiveDownloadTask) GetName() string { + return fmt.Sprintf("decompress [%s](%s)[%s] to [%s](%s) with password <%s>", t.SrcStorageMp, t.SrcObjPath, + t.InnerPath, t.DstStorageMp, t.DstDirPath, t.Password) +} + +func (t *ArchiveDownloadTask) GetStatus() string { + return t.status +} + +func (t *ArchiveDownloadTask) Run() error { + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() + uploadTask, err := t.RunWithoutPushUploadTask() + if err != nil { + return err + } + ArchiveContentUploadTaskManager.Add(uploadTask) + return nil +} + +func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadTask, error) { + var err error + if t.srcStorage == nil { + t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp) + } + l, srcObj, err := op.Link(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{ + Header: http.Header{}, + }) + if err != nil { + return nil, err + } + fs := stream.FileStream{ + Obj: srcObj, + Ctx: t.Ctx(), + } + ss, err := stream.NewSeekableStream(fs, l) + if err != nil { + return nil, err + } + defer func() { + if err := ss.Close(); err != nil { + log.Errorf("failed to close file streamer, %v", err) + } + }() + var decompressUp model.UpdateProgress + if t.CacheFull { + t.SetTotalBytes(srcObj.GetSize()) + t.status = "getting src object" + _, err = ss.CacheFullInTempFileAndUpdateProgress(t.SetProgress) + if err != nil { + return nil, err + } + decompressUp = func(_ float64) {} + } else { + decompressUp = t.SetProgress + } + t.status = "walking and decompressing" + dir, err := os.MkdirTemp(conf.Conf.TempDir, "dir-*") + if err != nil { + return nil, err + } + err = t.Tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp) + if err != nil { + return nil, err + } + baseName := strings.TrimSuffix(srcObj.GetName(), stdpath.Ext(srcObj.GetName())) + uploadTask := &ArchiveContentUploadTask{ + TaskExtension: task.TaskExtension{ + Creator: t.GetCreator(), + }, + ObjName: baseName, + InPlace: !t.PutIntoNewDir, + FilePath: dir, + DstDirPath: t.DstDirPath, + dstStorage: t.dstStorage, + DstStorageMp: t.DstStorageMp, + } + return uploadTask, nil +} + +var ArchiveDownloadTaskManager *tache.Manager[*ArchiveDownloadTask] + +type ArchiveContentUploadTask struct { + task.TaskExtension + status string + ObjName string + InPlace bool + FilePath string + DstDirPath string + dstStorage driver.Driver + DstStorageMp string + finalized bool +} + +func (t *ArchiveContentUploadTask) GetName() string { + return fmt.Sprintf("upload %s to [%s](%s)", t.ObjName, t.DstStorageMp, t.DstDirPath) +} + +func (t *ArchiveContentUploadTask) GetStatus() string { + return t.status +} + +func (t *ArchiveContentUploadTask) Run() error { + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() + return t.RunWithNextTaskCallback(func(nextTsk *ArchiveContentUploadTask) error { + ArchiveContentUploadTaskManager.Add(nextTsk) + return nil + }) +} + +func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTsk *ArchiveContentUploadTask) error) error { + var err error + if t.dstStorage == nil { + t.dstStorage, err = op.GetStorageByMountPath(t.DstStorageMp) + } + info, err := os.Stat(t.FilePath) + if err != nil { + return err + } + if info.IsDir() { + t.status = "src object is dir, listing objs" + nextDstPath := t.DstDirPath + if !t.InPlace { + nextDstPath = stdpath.Join(nextDstPath, t.ObjName) + err = op.MakeDir(t.Ctx(), t.dstStorage, nextDstPath) + if err != nil { + return err + } + } + entries, err := os.ReadDir(t.FilePath) + if err != nil { + return err + } + var es error + for _, entry := range entries { + var nextFilePath string + if entry.IsDir() { + nextFilePath, err = moveToTempPath(stdpath.Join(t.FilePath, entry.Name()), "dir-") + } else { + nextFilePath, err = moveToTempPath(stdpath.Join(t.FilePath, entry.Name()), "file-") + } + if err != nil { + es = stderrors.Join(es, err) + continue + } + err = f(&ArchiveContentUploadTask{ + TaskExtension: task.TaskExtension{ + Creator: t.GetCreator(), + }, + ObjName: entry.Name(), + InPlace: false, + FilePath: nextFilePath, + DstDirPath: nextDstPath, + dstStorage: t.dstStorage, + DstStorageMp: t.DstStorageMp, + }) + if err != nil { + es = stderrors.Join(es, err) + } + } + if es != nil { + return es + } + } else { + t.SetTotalBytes(info.Size()) + file, err := os.Open(t.FilePath) + if err != nil { + return err + } + fs := &stream.FileStream{ + Obj: &model.Object{ + Name: t.ObjName, + Size: info.Size(), + Modified: time.Now(), + }, + Mimetype: mime.TypeByExtension(filepath.Ext(t.ObjName)), + WebPutAsTask: true, + Reader: file, + } + fs.Closers.Add(file) + t.status = "uploading" + err = op.Put(t.Ctx(), t.dstStorage, t.DstDirPath, fs, t.SetProgress, true) + if err != nil { + return err + } + } + t.deleteSrcFile() + return nil +} + +func (t *ArchiveContentUploadTask) Cancel() { + t.TaskExtension.Cancel() + t.deleteSrcFile() +} + +func (t *ArchiveContentUploadTask) deleteSrcFile() { + if !t.finalized { + _ = os.RemoveAll(t.FilePath) + t.finalized = true + } +} + +func moveToTempPath(path, prefix string) (string, error) { + newPath, err := genTempFileName(prefix) + if err != nil { + return "", err + } + err = os.Rename(path, newPath) + if err != nil { + return "", err + } + return newPath, nil +} + +func genTempFileName(prefix string) (string, error) { + retry := 0 + for retry < 10000 { + newPath := stdpath.Join(conf.Conf.TempDir, prefix+strconv.FormatUint(uint64(rand.Uint32()), 10)) + if _, err := os.Stat(newPath); err != nil { + if os.IsNotExist(err) { + return newPath, nil + } else { + return "", err + } + } + retry++ + } + return "", errors.New("failed to generate temp-file name: too many retries") +} + +type archiveContentUploadTaskManagerType struct { + *tache.Manager[*ArchiveContentUploadTask] +} + +func (m *archiveContentUploadTaskManagerType) Remove(id string) { + if t, ok := m.GetByID(id); ok { + t.deleteSrcFile() + m.Manager.Remove(id) + } +} + +func (m *archiveContentUploadTaskManagerType) RemoveAll() { + tasks := m.GetAll() + for _, t := range tasks { + m.Remove(t.GetID()) + } +} + +func (m *archiveContentUploadTaskManagerType) RemoveByState(state ...tache.State) { + tasks := m.GetByState(state...) + for _, t := range tasks { + m.Remove(t.GetID()) + } +} + +func (m *archiveContentUploadTaskManagerType) RemoveByCondition(condition func(task *ArchiveContentUploadTask) bool) { + tasks := m.GetByCondition(condition) + for _, t := range tasks { + m.Remove(t.GetID()) + } +} + +var ArchiveContentUploadTaskManager = &archiveContentUploadTaskManagerType{ + Manager: nil, +} + +func archiveMeta(ctx context.Context, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return nil, errors.WithMessage(err, "failed get storage") + } + return op.GetArchiveMeta(ctx, storage, actualPath, args) +} + +func archiveList(ctx context.Context, path string, args model.ArchiveListArgs) ([]model.Obj, error) { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return nil, errors.WithMessage(err, "failed get storage") + } + return op.ListArchive(ctx, storage, actualPath, args) +} + +func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) (task.TaskExtensionInfo, error) { + srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath) + if err != nil { + return nil, errors.WithMessage(err, "failed get src storage") + } + dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) + if err != nil { + return nil, errors.WithMessage(err, "failed get dst storage") + } + if srcStorage.GetStorage() == dstStorage.GetStorage() { + err = op.ArchiveDecompress(ctx, srcStorage, srcObjActualPath, dstDirActualPath, args, lazyCache...) + if !errors.Is(err, errs.NotImplement) { + return nil, err + } + } + ext := stdpath.Ext(srcObjActualPath) + t, err := tool.GetArchiveTool(ext) + if err != nil { + return nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext) + } + taskCreator, _ := ctx.Value("user").(*model.User) + tsk := &ArchiveDownloadTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, + ArchiveDecompressArgs: args, + srcStorage: srcStorage, + dstStorage: dstStorage, + SrcObjPath: srcObjActualPath, + DstDirPath: dstDirActualPath, + SrcStorageMp: srcStorage.GetStorage().MountPath, + DstStorageMp: dstStorage.GetStorage().MountPath, + Tool: t, + } + if ctx.Value(conf.NoTaskKey) != nil { + uploadTask, err := tsk.RunWithoutPushUploadTask() + if err != nil { + return nil, errors.WithMessagef(err, "failed download [%s]", srcObjPath) + } + defer uploadTask.deleteSrcFile() + var callback func(t *ArchiveContentUploadTask) error + callback = func(t *ArchiveContentUploadTask) error { + e := t.RunWithNextTaskCallback(callback) + t.deleteSrcFile() + return e + } + return nil, uploadTask.RunWithNextTaskCallback(callback) + } else { + ArchiveDownloadTaskManager.Add(tsk) + return tsk, nil + } +} + +func archiveDriverExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed get storage") + } + return op.DriverExtract(ctx, storage, actualPath, args) +} + +func archiveInternalExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return nil, 0, errors.WithMessage(err, "failed get storage") + } + return op.InternalExtract(ctx, storage, actualPath, args) +} diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 24f1d47f..a873f917 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -7,6 +7,7 @@ import ( "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/task" log "github.com/sirupsen/logrus" + "io" ) // the param named path of functions in this package is a mount path @@ -109,6 +110,46 @@ func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) return t, err } +func ArchiveMeta(ctx context.Context, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) { + meta, err := archiveMeta(ctx, path, args) + if err != nil { + log.Errorf("failed get archive meta %s: %+v", path, err) + } + return meta, err +} + +func ArchiveList(ctx context.Context, path string, args model.ArchiveListArgs) ([]model.Obj, error) { + objs, err := archiveList(ctx, path, args) + if err != nil { + log.Errorf("failed list archive [%s]%s: %+v", path, args.InnerPath, err) + } + return objs, err +} + +func ArchiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) (task.TaskExtensionInfo, error) { + t, err := archiveDecompress(ctx, srcObjPath, dstDirPath, args, lazyCache...) + if err != nil { + log.Errorf("failed decompress [%s]%s: %+v", srcObjPath, args.InnerPath, err) + } + return t, err +} + +func ArchiveDriverExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) { + l, obj, err := archiveDriverExtract(ctx, path, args) + if err != nil { + log.Errorf("failed extract [%s]%s: %+v", path, args.InnerPath, err) + } + return l, obj, err +} + +func ArchiveInternalExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + l, obj, err := archiveInternalExtract(ctx, path, args) + if err != nil { + log.Errorf("failed extract [%s]%s: %+v", path, args.InnerPath, err) + } + return l, obj, err +} + type GetStoragesArgs struct { } diff --git a/internal/model/archive.go b/internal/model/archive.go new file mode 100644 index 00000000..03ac7c36 --- /dev/null +++ b/internal/model/archive.go @@ -0,0 +1,49 @@ +package model + +type ObjTree interface { + Obj + GetChildren() []ObjTree +} + +type ObjectTree struct { + Object + Children []ObjTree +} + +func (t *ObjectTree) GetChildren() []ObjTree { + return t.Children +} + +type ArchiveMeta interface { + GetComment() string + // IsEncrypted means if the content of the archive requires a password to access + // GetArchiveMeta should return errs.WrongArchivePassword if the meta-info is also encrypted, + // and the provided password is empty. + IsEncrypted() bool + // GetTree directly returns the full folder structure + // returns nil if the folder structure should be acquired by calling driver.ArchiveReader.ListArchive + GetTree() []ObjTree +} + +type ArchiveMetaInfo struct { + Comment string + Encrypted bool + Tree []ObjTree +} + +func (m *ArchiveMetaInfo) GetComment() string { + return m.Comment +} + +func (m *ArchiveMetaInfo) IsEncrypted() bool { + return m.Encrypted +} + +func (m *ArchiveMetaInfo) GetTree() []ObjTree { + return m.Tree +} + +type ArchiveMetaProvider struct { + ArchiveMeta + DriverProviding bool +} diff --git a/internal/model/args.go b/internal/model/args.go index 613699b9..a9feeb20 100644 --- a/internal/model/args.go +++ b/internal/model/args.go @@ -48,6 +48,33 @@ type FsOtherArgs struct { Method string `json:"method" form:"method"` Data interface{} `json:"data" form:"data"` } + +type ArchiveArgs struct { + Password string + LinkArgs +} + +type ArchiveInnerArgs struct { + ArchiveArgs + InnerPath string +} + +type ArchiveMetaArgs struct { + ArchiveArgs + Refresh bool +} + +type ArchiveListArgs struct { + ArchiveInnerArgs + Refresh bool +} + +type ArchiveDecompressArgs struct { + ArchiveInnerArgs + CacheFull bool + PutIntoNewDir bool +} + type RangeReadCloserIF interface { RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) utils.ClosersIF diff --git a/internal/model/obj.go b/internal/model/obj.go index 122fb546..2a72ca9e 100644 --- a/internal/model/obj.go +++ b/internal/model/obj.go @@ -48,8 +48,11 @@ type FileStreamer interface { RangeRead(http_range.Range) (io.Reader, error) //for a non-seekable Stream, if Read is called, this function won't work CacheFullInTempFile() (File, error) + CacheFullInTempFileAndUpdateProgress(up UpdateProgress) (File, error) } +type UpdateProgress func(percentage float64) + type URL interface { URL() string } diff --git a/internal/model/user.go b/internal/model/user.go index f75fc687..eaa0fed9 100644 --- a/internal/model/user.go +++ b/internal/model/user.go @@ -44,6 +44,8 @@ type User struct { // 9: webdav write // 10: ftp/sftp login and read // 11: ftp/sftp write + // 12: can read archives + // 13: can decompress archives Permission int32 `json:"permission"` OtpSecret string `json:"-"` SsoID string `json:"sso_id"` // unique by sso platform @@ -127,6 +129,14 @@ func (u *User) CanFTPManage() bool { return (u.Permission>>11)&1 == 1 } +func (u *User) CanReadArchives() bool { + return (u.Permission>>12)&1 == 1 +} + +func (u *User) CanDecompress() bool { + return (u.Permission>>13)&1 == 1 +} + func (u *User) JoinPath(reqPath string) (string, error) { return utils.JoinBasePath(u.BasePath, reqPath) } diff --git a/internal/op/archive.go b/internal/op/archive.go new file mode 100644 index 00000000..6a9fa084 --- /dev/null +++ b/internal/op/archive.go @@ -0,0 +1,424 @@ +package op + +import ( + "context" + stderrors "errors" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/stream" + "io" + stdpath "path" + "strings" + "time" + + "github.com/Xhofe/go-cache" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/singleflight" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64)) +var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider] + +func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + path = utils.FixAndCleanPath(path) + key := Key(storage, path) + if !args.Refresh { + if meta, ok := archiveMetaCache.Get(key); ok { + log.Debugf("use cache when get %s archive meta", path) + return meta, nil + } + } + fn := func() (*model.ArchiveMetaProvider, error) { + _, m, err := getArchiveMeta(ctx, storage, path, args) + if err != nil { + return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err) + } + if !storage.Config().NoCache { + archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) + } + return m, nil + } + if storage.Config().OnlyLocal { + meta, err := fn() + return meta, err + } + meta, err, _ := archiveMetaG.Do(key, fn) + return meta, err +} + +func getArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, *stream.SeekableStream, error) { + l, obj, err := Link(ctx, storage, path, args) + if err != nil { + return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path) + } + ext := stdpath.Ext(obj.GetName()) + t, err := tool.GetArchiveTool(ext) + if err != nil { + return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext) + } + ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l) + if err != nil { + return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path) + } + return obj, t, ss, nil +} + +func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) { + storageAr, ok := storage.(driver.ArchiveReader) + if ok { + obj, err := GetUnwrap(ctx, storage, path) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed to get file") + } + if obj.IsDir() { + return nil, nil, errors.WithStack(errs.NotFile) + } + meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs) + if !errors.Is(err, errs.NotImplement) { + return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}, err + } + } + obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + if err != nil { + return nil, nil, err + } + defer func() { + if err := ss.Close(); err != nil { + log.Errorf("failed to close file streamer, %v", err) + } + }() + meta, err := t.GetMeta(ss, args.ArchiveArgs) + return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false}, err +} + +var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64)) +var archiveListG singleflight.Group[[]model.Obj] + +func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + path = utils.FixAndCleanPath(path) + metaKey := Key(storage, path) + key := stdpath.Join(metaKey, args.InnerPath) + if !args.Refresh { + if files, ok := archiveListCache.Get(key); ok { + log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath) + return files, nil + } + if meta, ok := archiveMetaCache.Get(metaKey); ok { + log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath) + return getChildrenFromArchiveMeta(meta, args.InnerPath) + } + } + objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) { + obj, files, err := listArchive(ctx, storage, path, args) + if err != nil { + return nil, errors.Wrapf(err, "failed to list archive [%s]%s: %+v", path, args.InnerPath, err) + } + // set path + for _, f := range files { + if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && obj.GetPath() != "" { + s.SetPath(stdpath.Join(obj.GetPath(), args.InnerPath, f.GetName())) + } + } + // warp obj name + model.WrapObjsName(files) + // sort objs + if storage.Config().LocalSort { + model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection) + } + model.ExtractFolder(files, storage.GetStorage().ExtractFolder) + if !storage.Config().NoCache { + if len(files) > 0 { + log.Debugf("set cache: %s => %+v", key, files) + archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) + } else { + log.Debugf("del cache: %s", key) + archiveListCache.Del(key) + } + } + return files, nil + }) + return objs, err +} + +func _listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) { + storageAr, ok := storage.(driver.ArchiveReader) + if ok { + obj, err := GetUnwrap(ctx, storage, path) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed to get file") + } + if obj.IsDir() { + return nil, nil, errors.WithStack(errs.NotFile) + } + files, err := storageAr.ListArchive(ctx, obj, args.ArchiveInnerArgs) + if !errors.Is(err, errs.NotImplement) { + return obj, files, err + } + } + obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + if err != nil { + return nil, nil, err + } + defer func() { + if err := ss.Close(); err != nil { + log.Errorf("failed to close file streamer, %v", err) + } + }() + files, err := t.List(ss, args.ArchiveInnerArgs) + return obj, files, err +} + +func listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) { + obj, files, err := _listArchive(ctx, storage, path, args) + if errors.Is(err, errs.NotSupport) { + var meta model.ArchiveMeta + meta, err = GetArchiveMeta(ctx, storage, path, model.ArchiveMetaArgs{ + ArchiveArgs: args.ArchiveArgs, + Refresh: args.Refresh, + }) + if err != nil { + return nil, nil, err + } + files, err = getChildrenFromArchiveMeta(meta, args.InnerPath) + if err != nil { + return nil, nil, err + } + } + if err == nil && obj == nil { + obj, err = GetUnwrap(ctx, storage, path) + } + if err != nil { + return nil, nil, err + } + return obj, files, err +} + +func getChildrenFromArchiveMeta(meta model.ArchiveMeta, innerPath string) ([]model.Obj, error) { + obj := meta.GetTree() + if obj == nil { + return nil, errors.WithStack(errs.NotImplement) + } + dirs := splitPath(innerPath) + for _, dir := range dirs { + var next model.ObjTree + for _, c := range obj { + if c.GetName() == dir { + next = c + break + } + } + if next == nil { + return nil, errors.WithStack(errs.ObjectNotFound) + } + if !next.IsDir() || next.GetChildren() == nil { + return nil, errors.WithStack(errs.NotFolder) + } + obj = next.GetChildren() + } + return utils.SliceConvert(obj, func(src model.ObjTree) (model.Obj, error) { + return src, nil + }) +} + +func splitPath(path string) []string { + var parts []string + for { + dir, file := stdpath.Split(path) + if file == "" { + break + } + parts = append([]string{file}, parts...) + path = strings.TrimSuffix(dir, "/") + } + return parts +} + +func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, model.Obj, error) { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + path = utils.FixAndCleanPath(path) + af, err := GetUnwrap(ctx, storage, path) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed to get file") + } + if af.IsDir() { + return nil, nil, errors.WithStack(errs.NotFile) + } + if g, ok := storage.(driver.ArchiveGetter); ok { + obj, err := g.ArchiveGet(ctx, af, args.ArchiveInnerArgs) + if err == nil { + return af, model.WrapObjName(obj), nil + } + } + + if utils.PathEqual(args.InnerPath, "/") { + return af, &model.ObjWrapName{ + Name: RootName, + Obj: &model.Object{ + Name: af.GetName(), + Path: af.GetPath(), + ID: af.GetID(), + Size: af.GetSize(), + Modified: af.ModTime(), + IsFolder: true, + }, + }, nil + } + + innerDir, name := stdpath.Split(args.InnerPath) + args.InnerPath = strings.TrimSuffix(innerDir, "/") + files, err := ListArchive(ctx, storage, path, args) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed get parent list") + } + for _, f := range files { + if f.GetName() == name { + return af, f, nil + } + } + return nil, nil, errors.WithStack(errs.ObjectNotFound) +} + +type extractLink struct { + Link *model.Link + Obj model.Obj +} + +var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16)) +var extractG singleflight.Group[*extractLink] + +func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + key := stdpath.Join(Key(storage, path), args.InnerPath) + if link, ok := extractCache.Get(key); ok { + return link.Link, link.Obj, nil + } else if link, ok := extractCache.Get(key + ":" + args.IP); ok { + return link.Link, link.Obj, nil + } + fn := func() (*extractLink, error) { + link, err := driverExtract(ctx, storage, path, args) + if err != nil { + return nil, errors.Wrapf(err, "failed extract archive") + } + if link.Link.Expiration != nil { + if link.Link.IPCacheKey { + key = key + ":" + args.IP + } + extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration)) + } + return link, nil + } + if storage.Config().OnlyLocal { + link, err := fn() + if err != nil { + return nil, nil, err + } + return link.Link, link.Obj, nil + } + link, err, _ := extractG.Do(key, fn) + if err != nil { + return nil, nil, err + } + return link.Link, link.Obj, err +} + +func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) { + storageAr, ok := storage.(driver.ArchiveReader) + if !ok { + return nil, errs.DriverExtractNotSupported + } + archiveFile, extracted, err := ArchiveGet(ctx, storage, path, model.ArchiveListArgs{ + ArchiveInnerArgs: args, + Refresh: false, + }) + if err != nil { + return nil, errors.WithMessage(err, "failed to get file") + } + if extracted.IsDir() { + return nil, errors.WithStack(errs.NotFile) + } + link, err := storageAr.Extract(ctx, archiveFile, args) + return &extractLink{Link: link, Obj: extracted}, err +} + +type streamWithParent struct { + rc io.ReadCloser + parent *stream.SeekableStream +} + +func (s *streamWithParent) Read(p []byte) (int, error) { + return s.rc.Read(p) +} + +func (s *streamWithParent) Close() error { + err1 := s.rc.Close() + err2 := s.parent.Close() + return stderrors.Join(err1, err2) +} + +func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + _, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + if err != nil { + return nil, 0, err + } + rc, size, err := t.Extract(ss, args) + if err != nil { + if e := ss.Close(); e != nil { + log.Errorf("failed to close file streamer, %v", e) + } + return nil, 0, err + } + return &streamWithParent{rc: rc, parent: ss}, size, nil +} + +func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + srcPath = utils.FixAndCleanPath(srcPath) + dstDirPath = utils.FixAndCleanPath(dstDirPath) + srcObj, err := GetUnwrap(ctx, storage, srcPath) + if err != nil { + return errors.WithMessage(err, "failed to get src object") + } + dstDir, err := GetUnwrap(ctx, storage, dstDirPath) + if err != nil { + return errors.WithMessage(err, "failed to get dst dir") + } + + switch s := storage.(type) { + case driver.ArchiveDecompressResult: + var newObjs []model.Obj + newObjs, err = s.ArchiveDecompress(ctx, srcObj, dstDir, args) + if err == nil { + if newObjs != nil && len(newObjs) > 0 { + for _, newObj := range newObjs { + addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + } + } else if !utils.IsBool(lazyCache...) { + ClearCache(storage, dstDirPath) + } + } + case driver.ArchiveDecompress: + err = s.ArchiveDecompress(ctx, srcObj, dstDir, args) + if err == nil && !utils.IsBool(lazyCache...) { + ClearCache(storage, dstDirPath) + } + default: + return errs.NotImplement + } + return errors.WithStack(err) +} diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 2c9543c1..b19eb077 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "math" "os" "github.com/alist-org/alist/v3/internal/errs" @@ -60,6 +61,8 @@ func (f *FileStream) Close() error { err2 = os.RemoveAll(f.tmpFile.Name()) if err2 != nil { err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name()) + } else { + f.tmpFile = nil } } @@ -92,6 +95,26 @@ func (f *FileStream) CacheFullInTempFile() (model.File, error) { return f.tmpFile, nil } +func (f *FileStream) CacheFullInTempFileAndUpdateProgress(up model.UpdateProgress) (model.File, error) { + if f.tmpFile != nil { + return f.tmpFile, nil + } + if file, ok := f.Reader.(model.File); ok { + return file, nil + } + tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{ + Reader: f, + UpdateProgress: up, + }, f.GetSize()) + if err != nil { + return nil, err + } + f.Add(tmpF) + f.tmpFile = tmpF + f.Reader = tmpF + return f.tmpFile, nil +} + const InMemoryBufMaxSize = 10 // Megabytes const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024 @@ -247,7 +270,202 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) { return ss.tmpFile, nil } +func (ss *SeekableStream) CacheFullInTempFileAndUpdateProgress(up model.UpdateProgress) (model.File, error) { + if ss.tmpFile != nil { + return ss.tmpFile, nil + } + if ss.mFile != nil { + return ss.mFile, nil + } + tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{ + Reader: ss, + UpdateProgress: up, + }, ss.GetSize()) + if err != nil { + return nil, err + } + ss.Add(tmpF) + ss.tmpFile = tmpF + ss.Reader = tmpF + return ss.tmpFile, nil +} + func (f *FileStream) SetTmpFile(r *os.File) { f.Reader = r f.tmpFile = r } + +type ReaderWithSize interface { + io.Reader + GetSize() int64 +} + +type SimpleReaderWithSize struct { + io.Reader + Size int64 +} + +func (r *SimpleReaderWithSize) GetSize() int64 { + return r.Size +} + +type ReaderUpdatingProgress struct { + Reader ReaderWithSize + model.UpdateProgress + offset int +} + +func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + r.offset += n + r.UpdateProgress(math.Min(100.0, float64(r.offset)/float64(r.Reader.GetSize())*100.0)) + return n, err +} + +type SStreamReadAtSeeker interface { + model.File + GetRawStream() *SeekableStream +} + +type readerCur struct { + reader io.Reader + cur int64 +} + +type RangeReadReadAtSeeker struct { + ss *SeekableStream + masterOff int64 + readers []*readerCur +} + +type FileReadAtSeeker struct { + ss *SeekableStream +} + +func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) { + if ss.mFile != nil { + _, err := ss.mFile.Seek(offset, io.SeekStart) + if err != nil { + return nil, err + } + return &FileReadAtSeeker{ss: ss}, nil + } + var r io.Reader + var err error + if offset != 0 || utils.IsBool(forceRange...) { + if offset < 0 || offset > ss.GetSize() { + return nil, errors.New("offset out of range") + } + r, err = ss.RangeRead(http_range.Range{Start: offset, Length: -1}) + if err != nil { + return nil, err + } + if rc, ok := r.(io.Closer); ok { + ss.Closers.Add(rc) + } + } else { + r = ss + } + return &RangeReadReadAtSeeker{ + ss: ss, + masterOff: offset, + readers: []*readerCur{{reader: r, cur: offset}}, + }, nil +} + +func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream { + return r.ss +} + +func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) { + for _, reader := range r.readers { + if reader.cur == off { + return reader, nil + } + } + reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1}) + if err != nil { + return nil, err + } + if c, ok := reader.(io.Closer); ok { + r.ss.Closers.Add(c) + } + rc := &readerCur{reader: reader, cur: off} + r.readers = append(r.readers, rc) + return rc, nil +} + +func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) { + rc, err := r.getReaderAtOffset(off) + if err != nil { + return 0, err + } + num := 0 + for num < len(p) { + n, err := rc.reader.Read(p[num:]) + rc.cur += int64(n) + num += n + if err != nil { + return num, err + } + } + return num, nil +} + +func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + case io.SeekCurrent: + if offset == 0 { + return r.masterOff, nil + } + offset += r.masterOff + case io.SeekEnd: + offset += r.ss.GetSize() + default: + return 0, errs.NotSupport + } + if offset < 0 { + return r.masterOff, errors.New("invalid seek: negative position") + } + if offset > r.ss.GetSize() { + return r.masterOff, io.EOF + } + r.masterOff = offset + return offset, nil +} + +func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { + rc, err := r.getReaderAtOffset(r.masterOff) + if err != nil { + return 0, err + } + n, err = rc.reader.Read(p) + rc.cur += int64(n) + r.masterOff += int64(n) + return n, err +} + +func (r *RangeReadReadAtSeeker) Close() error { + return r.ss.Close() +} + +func (f *FileReadAtSeeker) GetRawStream() *SeekableStream { + return f.ss +} + +func (f *FileReadAtSeeker) Read(p []byte) (n int, err error) { + return f.ss.mFile.Read(p) +} + +func (f *FileReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) { + return f.ss.mFile.ReadAt(p, off) +} + +func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) { + return f.ss.mFile.Seek(offset, whence) +} + +func (f *FileReadAtSeeker) Close() error { + return f.ss.Close() +} diff --git a/internal/task/manager.go b/internal/task/manager.go new file mode 100644 index 00000000..3caa685a --- /dev/null +++ b/internal/task/manager.go @@ -0,0 +1,20 @@ +package task + +import "github.com/xhofe/tache" + +type Manager[T tache.Task] interface { + Add(task T) + Cancel(id string) + CancelAll() + CancelByCondition(condition func(task T) bool) + GetAll() []T + GetByID(id string) (T, bool) + GetByState(state ...tache.State) []T + GetByCondition(condition func(task T) bool) []T + Remove(id string) + RemoveAll() + RemoveByState(state ...tache.State) + RemoveByCondition(condition func(task T) bool) + Retry(id string) + RetryAllFailed() +} diff --git a/server/ftp/fsread.go b/server/ftp/fsread.go index 257d2ec8..f7e018e0 100644 --- a/server/ftp/fsread.go +++ b/server/ftp/fsread.go @@ -8,10 +8,8 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/server/common" "github.com/pkg/errors" - "io" fs2 "io/fs" "net/http" "os" @@ -20,9 +18,7 @@ import ( type FileDownloadProxy struct { ftpserver.FileTransfer - ss *stream.SeekableStream - reader io.Reader - cur int64 + reader stream.SStreamReadAtSeeker } func OpenDownload(ctx context.Context, reqPath string, offset int64) (*FileDownloadProxy, error) { @@ -55,22 +51,16 @@ func OpenDownload(ctx context.Context, reqPath string, offset int64) (*FileDownl if err != nil { return nil, err } - var reader io.Reader - if offset != 0 { - reader, err = ss.RangeRead(http_range.Range{Start: offset, Length: -1}) - if err != nil { - return nil, err - } - } else { - reader = ss + reader, err := stream.NewReadAtSeeker(ss, offset) + if err != nil { + _ = ss.Close() + return nil, err } - return &FileDownloadProxy{ss: ss, reader: reader}, nil + return &FileDownloadProxy{reader: reader}, nil } func (f *FileDownloadProxy) Read(p []byte) (n int, err error) { - n, err = f.reader.Read(p) - f.cur += int64(n) - return n, err + return f.reader.Read(p) } func (f *FileDownloadProxy) Write(p []byte) (n int, err error) { @@ -78,32 +68,11 @@ func (f *FileDownloadProxy) Write(p []byte) (n int, err error) { } func (f *FileDownloadProxy) Seek(offset int64, whence int) (int64, error) { - switch whence { - case io.SeekStart: - break - case io.SeekCurrent: - offset += f.cur - break - case io.SeekEnd: - offset += f.ss.GetSize() - break - default: - return 0, errs.NotSupport - } - if offset < 0 { - return 0, errors.New("Seek: negative position") - } - reader, err := f.ss.RangeRead(http_range.Range{Start: offset, Length: -1}) - if err != nil { - return f.cur, err - } - f.cur = offset - f.reader = reader - return offset, nil + return f.reader.Seek(offset, whence) } func (f *FileDownloadProxy) Close() error { - return f.ss.Close() + return f.reader.Close() } type OsFileInfoAdapter struct { diff --git a/server/handles/archive.go b/server/handles/archive.go new file mode 100644 index 00000000..29dbf3c2 --- /dev/null +++ b/server/handles/archive.go @@ -0,0 +1,381 @@ +package handles + +import ( + "fmt" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/sign" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/common" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "mime" + stdpath "path" + "strings" +) + +type ArchiveMetaReq struct { + Path string `json:"path" form:"path"` + Password string `json:"password" form:"password"` + Refresh bool `json:"refresh" form:"refresh"` + ArchivePass string `json:"archive_pass" form:"archive_pass"` +} + +type ArchiveMetaResp struct { + Comment string `json:"comment"` + IsEncrypted bool `json:"encrypted"` + Content []ArchiveContentResp `json:"content"` + RawURL string `json:"raw_url"` + Sign string `json:"sign"` +} + +type ArchiveContentResp struct { + ObjResp + Children []ArchiveContentResp `json:"children,omitempty"` +} + +func toObjsRespWithoutSignAndThumb(obj model.Obj) ObjResp { + return ObjResp{ + Name: obj.GetName(), + Size: obj.GetSize(), + IsDir: obj.IsDir(), + Modified: obj.ModTime(), + Created: obj.CreateTime(), + HashInfoStr: obj.GetHash().String(), + HashInfo: obj.GetHash().Export(), + Sign: "", + Thumb: "", + Type: utils.GetObjType(obj.GetName(), obj.IsDir()), + } +} + +func toContentResp(objs []model.ObjTree) []ArchiveContentResp { + if objs == nil { + return nil + } + ret, _ := utils.SliceConvert(objs, func(src model.ObjTree) (ArchiveContentResp, error) { + return ArchiveContentResp{ + ObjResp: toObjsRespWithoutSignAndThumb(src), + Children: toContentResp(src.GetChildren()), + }, nil + }) + return ret +} + +func FsArchiveMeta(c *gin.Context) { + var req ArchiveMetaReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + user := c.MustGet("user").(*model.User) + if !user.CanReadArchives() { + common.ErrorResp(c, errs.PermissionDenied, 403) + return + } + reqPath, err := user.JoinPath(req.Path) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + return + } + } + c.Set("meta", meta) + if !common.CanAccess(user, meta, reqPath, req.Password) { + common.ErrorStrResp(c, "password is incorrect or you have no permission", 403) + return + } + archiveArgs := model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: req.ArchivePass, + } + ret, err := fs.ArchiveMeta(c, reqPath, model.ArchiveMetaArgs{ + ArchiveArgs: archiveArgs, + Refresh: req.Refresh, + }) + if err != nil { + if errors.Is(err, errs.WrongArchivePassword) { + common.ErrorResp(c, err, 202) + } else { + common.ErrorResp(c, err, 500) + } + return + } + s := "" + if isEncrypt(meta, reqPath) || setting.GetBool(conf.SignAll) { + s = sign.Sign(reqPath) + } + api := "/ae" + if ret.DriverProviding { + api = "/ad" + } + common.SuccessResp(c, ArchiveMetaResp{ + Comment: ret.GetComment(), + IsEncrypted: ret.IsEncrypted(), + Content: toContentResp(ret.GetTree()), + RawURL: fmt.Sprintf("%s%s%s", common.GetApiUrl(c.Request), api, utils.EncodePath(reqPath, true)), + Sign: s, + }) +} + +type ArchiveListReq struct { + ArchiveMetaReq + model.PageReq + InnerPath string `json:"inner_path" form:"inner_path"` +} + +type ArchiveListResp struct { + Content []ObjResp `json:"content"` + Total int64 `json:"total"` +} + +func FsArchiveList(c *gin.Context) { + var req ArchiveListReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + req.Validate() + user := c.MustGet("user").(*model.User) + if !user.CanReadArchives() { + common.ErrorResp(c, errs.PermissionDenied, 403) + return + } + reqPath, err := user.JoinPath(req.Path) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + return + } + } + c.Set("meta", meta) + if !common.CanAccess(user, meta, reqPath, req.Password) { + common.ErrorStrResp(c, "password is incorrect or you have no permission", 403) + return + } + objs, err := fs.ArchiveList(c, reqPath, model.ArchiveListArgs{ + ArchiveInnerArgs: model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: req.ArchivePass, + }, + InnerPath: utils.FixAndCleanPath(req.InnerPath), + }, + Refresh: req.Refresh, + }) + if err != nil { + if errors.Is(err, errs.WrongArchivePassword) { + common.ErrorResp(c, err, 202) + } else { + common.ErrorResp(c, err, 500) + } + return + } + total, objs := pagination(objs, &req.PageReq) + ret, _ := utils.SliceConvert(objs, func(src model.Obj) (ObjResp, error) { + return toObjsRespWithoutSignAndThumb(src), nil + }) + common.SuccessResp(c, ArchiveListResp{ + Content: ret, + Total: int64(total), + }) +} + +type ArchiveDecompressReq struct { + SrcDir string `json:"src_dir" form:"src_dir"` + DstDir string `json:"dst_dir" form:"dst_dir"` + Name string `json:"name" form:"name"` + ArchivePass string `json:"archive_pass" form:"archive_pass"` + InnerPath string `json:"inner_path" form:"inner_path"` + CacheFull bool `json:"cache_full" form:"cache_full"` + PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"` +} + +func FsArchiveDecompress(c *gin.Context) { + var req ArchiveDecompressReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + user := c.MustGet("user").(*model.User) + if !user.CanDecompress() { + common.ErrorResp(c, errs.PermissionDenied, 403) + return + } + srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, req.Name)) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + dstDir, err := user.JoinPath(req.DstDir) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + t, err := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{ + ArchiveInnerArgs: model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: req.ArchivePass, + }, + InnerPath: utils.FixAndCleanPath(req.InnerPath), + }, + CacheFull: req.CacheFull, + PutIntoNewDir: req.PutIntoNewDir, + }) + if err != nil { + if errors.Is(err, errs.WrongArchivePassword) { + common.ErrorResp(c, err, 202) + } else { + common.ErrorResp(c, err, 500) + } + return + } + common.SuccessResp(c, gin.H{ + "task": getTaskInfo(t), + }) +} + +func ArchiveDown(c *gin.Context) { + archiveRawPath := c.MustGet("path").(string) + innerPath := utils.FixAndCleanPath(c.Query("inner")) + password := c.Query("pass") + filename := stdpath.Base(innerPath) + storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if common.ShouldProxy(storage, filename) { + ArchiveProxy(c) + return + } else { + link, _, err := fs.ArchiveDriverExtract(c, archiveRawPath, model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + IP: c.ClientIP(), + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: password, + }, + InnerPath: innerPath, + }) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + down(c, link) + } +} + +func ArchiveProxy(c *gin.Context) { + archiveRawPath := c.MustGet("path").(string) + innerPath := utils.FixAndCleanPath(c.Query("inner")) + password := c.Query("pass") + filename := stdpath.Base(innerPath) + storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if canProxy(storage, filename) { + // TODO: Support external download proxy URL + link, file, err := fs.ArchiveDriverExtract(c, archiveRawPath, model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: password, + }, + InnerPath: innerPath, + }) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + localProxy(c, link, file, storage.GetStorage().ProxyRange) + } else { + common.ErrorStrResp(c, "proxy not allowed", 403) + return + } +} + +func ArchiveInternalExtract(c *gin.Context) { + archiveRawPath := c.MustGet("path").(string) + innerPath := utils.FixAndCleanPath(c.Query("inner")) + password := c.Query("pass") + rc, size, err := fs.ArchiveInternalExtract(c, archiveRawPath, model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: password, + }, + InnerPath: innerPath, + }) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + defer func() { + if err := rc.Close(); err != nil { + log.Errorf("failed to close file streamer, %v", err) + } + }() + headers := map[string]string{ + "Referrer-Policy": "no-referrer", + "Cache-Control": "max-age=0, no-cache, no-store, must-revalidate", + } + if c.Query("attachment") == "true" { + filename := stdpath.Base(innerPath) + headers["Content-Disposition"] = fmt.Sprintf("attachment; filename=\"%s\"", filename) + } + contentType := c.Request.Header.Get("Content-Type") + if contentType == "" { + fileExt := stdpath.Ext(innerPath) + contentType = mime.TypeByExtension(fileExt) + } + c.DataFromReader(200, size, contentType, rc, headers) +} + +func ArchiveExtensions(c *gin.Context) { + var ext []string + for key := range tool.Tools { + ext = append(ext, strings.TrimPrefix(key, ".")) + } + common.SuccessResp(c, ext) +} diff --git a/server/handles/down.go b/server/handles/down.go index 0020ed14..f01c9d66 100644 --- a/server/handles/down.go +++ b/server/handles/down.go @@ -40,28 +40,7 @@ func Down(c *gin.Context) { common.ErrorResp(c, err, 500) return } - if link.MFile != nil { - defer func(ReadSeekCloser io.ReadCloser) { - err := ReadSeekCloser.Close() - if err != nil { - log.Errorf("close data error: %s", err) - } - }(link.MFile) - } - c.Header("Referrer-Policy", "no-referrer") - c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate") - if setting.GetBool(conf.ForwardDirectLinkParams) { - query := c.Request.URL.Query() - for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] { - query.Del(v) - } - link.URL, err = utils.InjectQuery(link.URL, query) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - } - c.Redirect(302, link.URL) + down(c, link) } } @@ -95,31 +74,62 @@ func Proxy(c *gin.Context) { common.ErrorResp(c, err, 500) return } - if link.URL != "" && setting.GetBool(conf.ForwardDirectLinkParams) { - query := c.Request.URL.Query() - for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] { - query.Del(v) - } - link.URL, err = utils.InjectQuery(link.URL, query) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - } - if storage.GetStorage().ProxyRange { - common.ProxyRange(link, file.GetSize()) - } - err = common.Proxy(c.Writer, c.Request, link, file) - if err != nil { - common.ErrorResp(c, err, 500, true) - return - } + localProxy(c, link, file, storage.GetStorage().ProxyRange) } else { common.ErrorStrResp(c, "proxy not allowed", 403) return } } +func down(c *gin.Context, link *model.Link) { + var err error + if link.MFile != nil { + defer func(ReadSeekCloser io.ReadCloser) { + err := ReadSeekCloser.Close() + if err != nil { + log.Errorf("close data error: %s", err) + } + }(link.MFile) + } + c.Header("Referrer-Policy", "no-referrer") + c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate") + if setting.GetBool(conf.ForwardDirectLinkParams) { + query := c.Request.URL.Query() + for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] { + query.Del(v) + } + link.URL, err = utils.InjectQuery(link.URL, query) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + } + c.Redirect(302, link.URL) +} + +func localProxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) { + var err error + if link.URL != "" && setting.GetBool(conf.ForwardDirectLinkParams) { + query := c.Request.URL.Query() + for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] { + query.Del(v) + } + link.URL, err = utils.InjectQuery(link.URL, query) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + } + if proxyRange { + common.ProxyRange(link, file.GetSize()) + } + err = common.Proxy(c.Writer, c.Request, link, file) + if err != nil { + common.ErrorResp(c, err, 500, true) + return + } +} + // TODO need optimize // when can be proxy? // 1. text file diff --git a/server/handles/task.go b/server/handles/task.go index c7d9ef48..af7974a9 100644 --- a/server/handles/task.go +++ b/server/handles/task.go @@ -75,7 +75,7 @@ func getUserInfo(c *gin.Context) (bool, uint, bool) { } } -func getTargetedHandler[T task.TaskExtensionInfo](manager *tache.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc { +func getTargetedHandler[T task.TaskExtensionInfo](manager task.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc { return func(c *gin.Context) { isAdmin, uid, ok := getUserInfo(c) if !ok { @@ -97,7 +97,7 @@ func getTargetedHandler[T task.TaskExtensionInfo](manager *tache.Manager[T], cal } } -func getBatchHandler[T task.TaskExtensionInfo](manager *tache.Manager[T], callback func(task T)) gin.HandlerFunc { +func getBatchHandler[T task.TaskExtensionInfo](manager task.Manager[T], callback func(task T)) gin.HandlerFunc { return func(c *gin.Context) { isAdmin, uid, ok := getUserInfo(c) if !ok { @@ -122,7 +122,7 @@ func getBatchHandler[T task.TaskExtensionInfo](manager *tache.Manager[T], callba } } -func taskRoute[T task.TaskExtensionInfo](g *gin.RouterGroup, manager *tache.Manager[T]) { +func taskRoute[T task.TaskExtensionInfo](g *gin.RouterGroup, manager task.Manager[T]) { g.GET("/undone", func(c *gin.Context) { isAdmin, uid, ok := getUserInfo(c) if !ok { @@ -220,4 +220,6 @@ func SetupTaskRoute(g *gin.RouterGroup) { taskRoute(g.Group("/copy"), fs.CopyTaskManager) taskRoute(g.Group("/offline_download"), tool.DownloadTaskManager) taskRoute(g.Group("/offline_download_transfer"), tool.TransferTaskManager) + taskRoute(g.Group("/decompress"), fs.ArchiveDownloadTaskManager) + taskRoute(g.Group("/decompress_upload"), fs.ArchiveContentUploadTaskManager) } diff --git a/server/router.go b/server/router.go index 184de51e..63bad60f 100644 --- a/server/router.go +++ b/server/router.go @@ -42,6 +42,12 @@ func Init(e *gin.Engine) { g.GET("/p/*path", middlewares.Down, handles.Proxy) g.HEAD("/d/*path", middlewares.Down, handles.Down) g.HEAD("/p/*path", middlewares.Down, handles.Proxy) + g.GET("/ad/*path", middlewares.Down, handles.ArchiveDown) + g.GET("/ap/*path", middlewares.Down, handles.ArchiveProxy) + g.GET("/ae/*path", middlewares.Down, handles.ArchiveInternalExtract) + g.HEAD("/ad/*path", middlewares.Down, handles.ArchiveDown) + g.HEAD("/ap/*path", middlewares.Down, handles.ArchiveProxy) + g.HEAD("/ae/*path", middlewares.Down, handles.ArchiveInternalExtract) api := g.Group("/api") auth := api.Group("", middlewares.Auth) @@ -77,6 +83,7 @@ func Init(e *gin.Engine) { public := api.Group("/public") public.Any("/settings", handles.PublicSettings) public.Any("/offline_download_tools", handles.OfflineDownloadTools) + public.Any("/archive_extensions", handles.ArchiveExtensions) _fs(auth.Group("/fs")) _task(auth.Group("/task", middlewares.AuthNotGuest)) @@ -173,6 +180,10 @@ func _fs(g *gin.RouterGroup) { // g.POST("/add_qbit", handles.AddQbittorrent) // g.POST("/add_transmission", handles.SetTransmission) g.POST("/add_offline_download", handles.AddOfflineDownload) + a := g.Group("/archive") + a.Any("/meta", handles.FsArchiveMeta) + a.Any("/list", handles.FsArchiveList) + a.POST("/decompress", handles.FsArchiveDecompress) } func _task(g *gin.RouterGroup) { From 59e02287b2bbd7f36358305421a20f79a5322f85 Mon Sep 17 00:00:00 2001 From: Jealous Date: Sat, 18 Jan 2025 23:39:07 +0800 Subject: [PATCH 085/187] feat(fs): add `overwrite` option to preventing unintentional overwriting (#7809) --- server/handles/fsmanage.go | 37 ++++++++++++++++++++++++++++++++----- server/handles/fsup.go | 16 ++++++++++++++++ 2 files changed, 48 insertions(+), 5 deletions(-) diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go index 9877b127..9349e7e2 100644 --- a/server/handles/fsmanage.go +++ b/server/handles/fsmanage.go @@ -56,9 +56,10 @@ func FsMkdir(c *gin.Context) { } type MoveCopyReq struct { - SrcDir string `json:"src_dir"` - DstDir string `json:"dst_dir"` - Names []string `json:"names"` + SrcDir string `json:"src_dir"` + DstDir string `json:"dst_dir"` + Names []string `json:"names"` + Overwrite bool `json:"overwrite"` } func FsMove(c *gin.Context) { @@ -86,6 +87,14 @@ func FsMove(c *gin.Context) { common.ErrorResp(c, err, 403) return } + if !req.Overwrite { + for _, name := range req.Names { + if res, _ := fs.Get(c, stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil { + common.ErrorStrResp(c, "file exists", 403) + return + } + } + } for i, name := range req.Names { err := fs.Move(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1) if err != nil { @@ -121,6 +130,14 @@ func FsCopy(c *gin.Context) { common.ErrorResp(c, err, 403) return } + if !req.Overwrite { + for _, name := range req.Names { + if res, _ := fs.Get(c, stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil { + common.ErrorStrResp(c, "file exists", 403) + return + } + } + } var addedTasks []task.TaskExtensionInfo for i, name := range req.Names { t, err := fs.Copy(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1) @@ -138,8 +155,9 @@ func FsCopy(c *gin.Context) { } type RenameReq struct { - Path string `json:"path"` - Name string `json:"name"` + Path string `json:"path"` + Name string `json:"name"` + Overwrite bool `json:"overwrite"` } func FsRename(c *gin.Context) { @@ -158,6 +176,15 @@ func FsRename(c *gin.Context) { common.ErrorResp(c, err, 403) return } + if !req.Overwrite { + dstPath := stdpath.Join(stdpath.Dir(reqPath), req.Name) + if dstPath != reqPath { + if res, _ := fs.Get(c, dstPath, &fs.GetArgs{NoLog: true}); res != nil { + common.ErrorStrResp(c, "file exists", 403) + return + } + } + } if err := fs.Rename(c, reqPath, req.Name); err != nil { common.ErrorResp(c, err, 500) return diff --git a/server/handles/fsup.go b/server/handles/fsup.go index a17c50f0..563afbcd 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -34,12 +34,20 @@ func FsStream(c *gin.Context) { return } asTask := c.GetHeader("As-Task") == "true" + overwrite := c.GetHeader("Overwrite") != "false" user := c.MustGet("user").(*model.User) path, err = user.JoinPath(path) if err != nil { common.ErrorResp(c, err, 403) return } + if !overwrite { + if res, _ := fs.Get(c, path, &fs.GetArgs{NoLog: true}); res != nil { + _, _ = io.Copy(io.Discard, c.Request.Body) + common.ErrorStrResp(c, "file exists", 403) + return + } + } dir, name := stdpath.Split(path) sizeStr := c.GetHeader("Content-Length") size, err := strconv.ParseInt(sizeStr, 10, 64) @@ -85,12 +93,20 @@ func FsForm(c *gin.Context) { return } asTask := c.GetHeader("As-Task") == "true" + overwrite := c.GetHeader("Overwrite") != "false" user := c.MustGet("user").(*model.User) path, err = user.JoinPath(path) if err != nil { common.ErrorResp(c, err, 403) return } + if !overwrite { + if res, _ := fs.Get(c, path, &fs.GetArgs{NoLog: true}); res != nil { + _, _ = io.Copy(io.Discard, c.Request.Body) + common.ErrorStrResp(c, "file exists", 403) + return + } + } storage, err := fs.GetStorage(path, &fs.GetStoragesArgs{}) if err != nil { common.ErrorResp(c, err, 400) From 11b6a6012f256facbeaf9314281321b05eeadef3 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Sat, 18 Jan 2025 23:52:02 +0800 Subject: [PATCH 086/187] fix(copy): use Link and Put when the driver does not support copying (#7834) --- internal/fs/copy.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/fs/copy.go b/internal/fs/copy.go index c3fadaab..977f7280 100644 --- a/internal/fs/copy.go +++ b/internal/fs/copy.go @@ -3,6 +3,7 @@ package fs import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/errs" "net/http" stdpath "path" "time" @@ -69,7 +70,10 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool } // copy if in the same storage, just call driver.Copy if srcStorage.GetStorage() == dstStorage.GetStorage() { - return nil, op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...) + err = op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...) + if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.NotSupport) { + return nil, err + } } if ctx.Value(conf.NoTaskKey) != nil { srcObj, err := op.Get(ctx, srcStorage, srcObjActualPath) From c2633dd4436a2337246979a9d866f7e408e6c2ea Mon Sep 17 00:00:00 2001 From: Jealous Date: Thu, 23 Jan 2025 22:49:35 +0800 Subject: [PATCH 087/187] fix(workflow): use the dev version of the web for beta releases (#7862) * fix(workflow): use dev version of the web for beta releases * chore(config): check version string by prefix --- .github/workflows/release_docker.yml | 7 ++++++- build.sh | 11 +++++++++-- internal/bootstrap/config.go | 2 +- internal/bootstrap/patch.go | 3 ++- 4 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release_docker.yml b/.github/workflows/release_docker.yml index f4c79baf..7cd05549 100644 --- a/.github/workflows/release_docker.yml +++ b/.github/workflows/release_docker.yml @@ -52,7 +52,12 @@ jobs: if: steps.cache-musl.outputs.cache-hit != 'true' run: bash build.sh prepare docker-multiplatform - - name: Build go binary + - name: Build go binary (beta) + if: env.IMAGE_IS_PROD != 'true' + run: bash build.sh beta docker-multiplatform + + - name: Build go binary (release) + if: env.IMAGE_IS_PROD == 'true' run: bash build.sh release docker-multiplatform - name: Upload artifacts diff --git a/build.sh b/build.sh index a87eabf4..d6e001c2 100644 --- a/build.sh +++ b/build.sh @@ -7,6 +7,9 @@ gitCommit=$(git log --pretty=format:"%h" -1) if [ "$1" = "dev" ]; then version="dev" webVersion="dev" +elif [ "$1" = "beta" ]; then + version="beta" + webVersion="dev" else git tag -d beta version=$(git describe --abbrev=0 --tags) @@ -301,8 +304,12 @@ if [ "$1" = "dev" ]; then else BuildDev fi -elif [ "$1" = "release" ]; then - FetchWebRelease +elif [ "$1" = "release" -o "$1" = "beta" ]; then + if [ "$1" = "beta" ]; then + FetchWebDev + else + FetchWebRelease + fi if [ "$2" = "docker" ]; then BuildDocker elif [ "$2" = "docker-multiplatform" ]; then diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index a44c7350..38b1aa9e 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -50,7 +50,7 @@ func InitConfig() { log.Fatalf("load config error: %+v", err) } LastLaunchedVersion = conf.Conf.LastLaunchedVersion - if conf.Version != "dev" || LastLaunchedVersion == "" { + if strings.HasPrefix(conf.Version, "v") || LastLaunchedVersion == "" { conf.Conf.LastLaunchedVersion = conf.Version } // update config.json struct diff --git a/internal/bootstrap/patch.go b/internal/bootstrap/patch.go index 8dc3ed02..2d22d1b6 100644 --- a/internal/bootstrap/patch.go +++ b/internal/bootstrap/patch.go @@ -5,6 +5,7 @@ import ( "github.com/alist-org/alist/v3/internal/bootstrap/patch" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/pkg/utils" + "strings" ) var LastLaunchedVersion = "" @@ -38,7 +39,7 @@ func compareVersion(majorA, minorA, patchNumA, majorB, minorB, patchNumB int) bo } func InitUpgradePatch() { - if conf.Version == "dev" { + if !strings.HasPrefix(conf.Version, "v") { return } if LastLaunchedVersion == conf.Version { From bdcf450203b70c05d748c605fbf9df9c47c98b2c Mon Sep 17 00:00:00 2001 From: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Mon, 27 Jan 2025 20:06:18 +0800 Subject: [PATCH 088/187] fix: resolve concurrent read/write issues in WrapObjName (#7865) --- internal/model/obj.go | 4 ++-- internal/model/object.go | 3 --- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/internal/model/obj.go b/internal/model/obj.go index 2a72ca9e..552b1241 100644 --- a/internal/model/obj.go +++ b/internal/model/obj.go @@ -115,12 +115,12 @@ func ExtractFolder(objs []Obj, extractFolder string) { } func WrapObjName(objs Obj) Obj { - return &ObjWrapName{Obj: objs} + return &ObjWrapName{Name: utils.MappingName(objs.GetName()), Obj: objs} } func WrapObjsName(objs []Obj) { for i := 0; i < len(objs); i++ { - objs[i] = &ObjWrapName{Obj: objs[i]} + objs[i] = &ObjWrapName{Name: utils.MappingName(objs[i].GetName()), Obj: objs[i]} } } diff --git a/internal/model/object.go b/internal/model/object.go index 93f2c307..c8c10bb9 100644 --- a/internal/model/object.go +++ b/internal/model/object.go @@ -16,9 +16,6 @@ func (o *ObjWrapName) Unwrap() Obj { } func (o *ObjWrapName) GetName() string { - if o.Name == "" { - o.Name = utils.MappingName(o.Obj.GetName()) - } return o.Name } From 2be0c3d1a088d2c74bb429c9d6072a73bd30fb1b Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Mon, 27 Jan 2025 20:08:39 +0800 Subject: [PATCH 089/187] feat(alias): add `DownloadConcurrency` and `DownloadPartSize` option (#7829) * fix(net): goroutine logic bug (AlistGo/alist#7215) * Fix goroutine logic bug * Fix bug --------- Co-authored-by: hpy hs * perf(net): sequential and dynamic concurrency * fix(net): incorrect error return * feat(alias): add `DownloadConcurrency` and `DownloadPartSize` option * feat(net): add `ConcurrencyLimit` * pref(net): create `chunk` on demand * refactor * refactor * fix(net): `r.Closers.Add` has no effect * refactor --------- Co-authored-by: hpy hs --- drivers/alias/driver.go | 10 + drivers/alias/meta.go | 6 +- drivers/alias/util.go | 10 +- drivers/crypt/driver.go | 7 +- drivers/github/driver.go | 15 +- drivers/halalcloud/driver.go | 16 +- drivers/mega/driver.go | 4 +- drivers/netease_music/types.go | 1 - drivers/netease_music/upload.go | 2 +- drivers/quqi/util.go | 4 +- internal/bootstrap/config.go | 4 + internal/conf/config.go | 2 + internal/model/args.go | 11 +- internal/net/request.go | 364 +++++++++++++----- internal/net/serve.go | 35 +- internal/net/util.go | 3 +- .../offline_download/transmission/client.go | 4 +- internal/stream/stream.go | 5 +- internal/stream/util.go | 44 +-- server/common/proxy.go | 15 +- server/handles/archive.go | 9 +- server/handles/down.go | 9 +- server/s3/backend.go | 52 +-- server/webdav/webdav.go | 2 +- 24 files changed, 396 insertions(+), 238 deletions(-) diff --git a/drivers/alias/driver.go b/drivers/alias/driver.go index 1b439a2c..16215c8e 100644 --- a/drivers/alias/driver.go +++ b/drivers/alias/driver.go @@ -110,6 +110,16 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( for _, dst := range dsts { link, err := d.link(ctx, dst, sub, args) if err == nil { + if !args.Redirect && len(link.URL) > 0 { + // 正常情况下 多并发 仅支持返回URL的驱动 + // alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发 + if d.DownloadConcurrency > 0 { + link.Concurrency = d.DownloadConcurrency + } + if d.DownloadPartSize > 0 { + link.PartSize = d.DownloadPartSize * utils.KB + } + } return link, nil } } diff --git a/drivers/alias/meta.go b/drivers/alias/meta.go index 45b88575..ed657a5d 100644 --- a/drivers/alias/meta.go +++ b/drivers/alias/meta.go @@ -9,8 +9,10 @@ type Addition struct { // Usually one of two // driver.RootPath // define other - Paths string `json:"paths" required:"true" type:"text"` - ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"` + Paths string `json:"paths" required:"true" type:"text"` + ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"` + DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"` + DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"` } var config = driver.Config{ diff --git a/drivers/alias/util.go b/drivers/alias/util.go index c0e9081b..ee17b622 100644 --- a/drivers/alias/util.go +++ b/drivers/alias/util.go @@ -9,6 +9,7 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" @@ -94,10 +95,15 @@ func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) { reqPath := stdpath.Join(dst, sub) - storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{}) + // 参考 crypt 驱动 + storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) if err != nil { return nil, err } + if _, ok := storage.(*Alias); !ok && !args.Redirect { + link, _, err := op.Link(ctx, storage, reqActualPath, args) + return link, err + } _, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) if err != nil { return nil, err @@ -114,7 +120,7 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) } return link, nil } - link, _, err := fs.Link(ctx, reqPath, args) + link, _, err := op.Link(ctx, storage, reqActualPath, args) return link, err } diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index b6115896..e6f253d1 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -275,7 +275,6 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( rrc = converted } if rrc != nil { - //remoteRangeReader, err := remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length}) remoteClosers.AddClosers(rrc.GetClosers()) if err != nil { @@ -288,10 +287,8 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( if err != nil { return nil, err } - //remoteClosers.Add(remoteLink.MFile) - //keep reuse same MFile and close at last. - remoteClosers.Add(remoteLink.MFile) - return io.NopCloser(remoteLink.MFile), nil + // 可以直接返回,读取完也不会调用Close,直到连接断开Close + return remoteLink.MFile, nil } return nil, errs.NotSupport diff --git a/drivers/github/driver.go b/drivers/github/driver.go index ea8f6276..eed06882 100644 --- a/drivers/github/driver.go +++ b/drivers/github/driver.go @@ -5,6 +5,13 @@ import ( "encoding/base64" "errors" "fmt" + "io" + "net/http" + stdpath "path" + "strings" + "sync" + "text/template" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -12,12 +19,6 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" log "github.com/sirupsen/logrus" - "io" - "net/http" - stdpath "path" - "strings" - "sync" - "text/template" ) type Github struct { @@ -656,7 +657,7 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv contentReader, contentWriter := io.Pipe() go func() { encoder := base64.NewEncoder(base64.StdEncoding, contentWriter) - if _, err := io.Copy(encoder, stream); err != nil { + if _, err := utils.CopyWithBuffer(encoder, stream); err != nil { _ = contentWriter.CloseWithError(err) return } diff --git a/drivers/halalcloud/driver.go b/drivers/halalcloud/driver.go index 08bb3808..d3235828 100644 --- a/drivers/halalcloud/driver.go +++ b/drivers/halalcloud/driver.go @@ -4,12 +4,17 @@ import ( "context" "crypto/sha1" "fmt" + "io" + "net/url" + "path" + "strconv" + "time" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/http_range" - "github.com/alist-org/alist/v3/pkg/utils" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" @@ -19,11 +24,6 @@ import ( pubUserFile "github.com/city404/v6-public-rpc-proto/go/v6/userfile" "github.com/rclone/rclone/lib/readers" "github.com/zzzhr1990/go-common-entity/userfile" - "io" - "net/url" - "path" - "strconv" - "time" ) type HalalCloud struct { @@ -251,7 +251,6 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin size := result.FileSize chunks := getChunkSizes(result.Sizes) - var finalClosers utils.Closers resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { length := httpRange.Length if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size { @@ -269,7 +268,6 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin sha: result.Sha1, shaTemp: sha1.New(), } - finalClosers.Add(oo) return readers.NewLimitedReadCloser(oo, length), nil } @@ -281,7 +279,7 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin duration = time.Until(time.Now().Add(time.Hour)) } - resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers} + resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader} return &model.Link{ RangeReadCloser: resultRangeReadCloser, Expiration: &duration, diff --git a/drivers/mega/driver.go b/drivers/mega/driver.go index 162aeef3..198c1f98 100644 --- a/drivers/mega/driver.go +++ b/drivers/mega/driver.go @@ -84,7 +84,6 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* //} size := file.GetSize() - var finalClosers utils.Closers resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { length := httpRange.Length if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size { @@ -103,11 +102,10 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* d: down, skip: httpRange.Start, } - finalClosers.Add(oo) return readers.NewLimitedReadCloser(oo, length), nil } - resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers} + resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader} resultLink := &model.Link{ RangeReadCloser: resultRangeReadCloser, } diff --git a/drivers/netease_music/types.go b/drivers/netease_music/types.go index edbd40ee..0e156ad1 100644 --- a/drivers/netease_music/types.go +++ b/drivers/netease_music/types.go @@ -64,7 +64,6 @@ func (lrc *LyricObj) getLyricLink() *model.Link { sr := io.NewSectionReader(reader, httpRange.Start, httpRange.Length) return io.NopCloser(sr), nil }, - Closers: utils.EmptyClosers(), }, } } diff --git a/drivers/netease_music/upload.go b/drivers/netease_music/upload.go index ece496b3..7f580bd1 100644 --- a/drivers/netease_music/upload.go +++ b/drivers/netease_music/upload.go @@ -47,7 +47,7 @@ func (u *uploader) init(stream model.FileStreamer) error { } h := md5.New() - io.Copy(h, stream) + utils.CopyWithBuffer(h, stream) u.md5 = hex.EncodeToString(h.Sum(nil)) _, err := u.file.Seek(0, io.SeekStart) if err != nil { diff --git a/drivers/quqi/util.go b/drivers/quqi/util.go index c025f6ee..c57e641b 100644 --- a/drivers/quqi/util.go +++ b/drivers/quqi/util.go @@ -300,9 +300,7 @@ func (d *Quqi) linkFromCDN(id string) (*model.Link, error) { bufferReader := bufio.NewReader(decryptReader) bufferReader.Discard(int(decryptedOffset)) - return utils.NewReadCloser(bufferReader, func() error { - return nil - }), nil + return io.NopCloser(bufferReader), nil } return &model.Link{ diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index 38b1aa9e..db3e2094 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -9,6 +9,7 @@ import ( "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/pkg/utils" "github.com/caarlos0/env/v9" log "github.com/sirupsen/logrus" @@ -63,6 +64,9 @@ func InitConfig() { log.Fatalf("update config struct error: %+v", err) } } + if conf.Conf.MaxConcurrency > 0 { + net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency} + } if !conf.Conf.Force { confFromEnv() } diff --git a/internal/conf/config.go b/internal/conf/config.go index 4f5c2ae0..39b23227 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -106,6 +106,7 @@ type Config struct { Log LogConfig `json:"log"` DelayedStart int `json:"delayed_start" env:"DELAYED_START"` MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"` + MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"` TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"` Tasks TasksConfig `json:"tasks" envPrefix:"TASKS_"` Cors Cors `json:"cors" envPrefix:"CORS_"` @@ -151,6 +152,7 @@ func DefaultConfig() *Config { MaxAge: 28, }, MaxConnections: 0, + MaxConcurrency: 64, TlsInsecureSkipVerify: true, Tasks: TasksConfig{ Download: TaskConfig{ diff --git a/internal/model/args.go b/internal/model/args.go index a9feeb20..f29c7e45 100644 --- a/internal/model/args.go +++ b/internal/model/args.go @@ -17,10 +17,11 @@ type ListArgs struct { } type LinkArgs struct { - IP string - Header http.Header - Type string - HttpReq *http.Request + IP string + Header http.Header + Type string + HttpReq *http.Request + Redirect bool } type Link struct { @@ -87,7 +88,7 @@ type RangeReadCloser struct { utils.Closers } -func (r RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { +func (r *RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { rc, err := r.RangeReader(ctx, httpRange) r.Closers.Add(rc) return rc, err diff --git a/internal/net/request.go b/internal/net/request.go index 1a7405e4..d2f3028f 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "math" "net/http" "strconv" "strings" @@ -21,7 +20,7 @@ import ( // DefaultDownloadPartSize is the default range of bytes to get at a time when // using Download(). -const DefaultDownloadPartSize = 1024 * 1024 * 10 +const DefaultDownloadPartSize = utils.MB * 10 // DefaultDownloadConcurrency is the default number of goroutines to spin up // when using Download(). @@ -30,6 +29,8 @@ const DefaultDownloadConcurrency = 2 // DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download. const DefaultPartBodyMaxRetries = 3 +var DefaultConcurrencyLimit *ConcurrencyLimit + type Downloader struct { PartSize int @@ -44,15 +45,15 @@ type Downloader struct { //RequestParam HttpRequestParams HttpClient HttpRequestFunc + + *ConcurrencyLimit } type HttpRequestFunc func(ctx context.Context, params *HttpRequestParams) (*http.Response, error) func NewDownloader(options ...func(*Downloader)) *Downloader { - d := &Downloader{ - HttpClient: DefaultHttpRequestFunc, - PartSize: DefaultDownloadPartSize, + d := &Downloader{ //允许不设置的选项 PartBodyMaxRetries: DefaultPartBodyMaxRetries, - Concurrency: DefaultDownloadConcurrency, + ConcurrencyLimit: DefaultConcurrencyLimit, } for _, option := range options { option(d) @@ -74,16 +75,16 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo impl := downloader{params: &finalP, cfg: d, ctx: ctx} // Ensures we don't need nil checks later on - - impl.partBodyMaxRetries = d.PartBodyMaxRetries - + // 必需的选项 if impl.cfg.Concurrency == 0 { impl.cfg.Concurrency = DefaultDownloadConcurrency } - if impl.cfg.PartSize == 0 { impl.cfg.PartSize = DefaultDownloadPartSize } + if impl.cfg.HttpClient == nil { + impl.cfg.HttpClient = DefaultHttpRequestFunc + } return impl.download() } @@ -91,7 +92,7 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo // downloader is the implementation structure used internally by Downloader. type downloader struct { ctx context.Context - cancel context.CancelFunc + cancel context.CancelCauseFunc cfg Downloader params *HttpRequestParams //http request params @@ -101,38 +102,78 @@ type downloader struct { m sync.Mutex nextChunk int //next chunk id - chunks []chunk bufs []*Buf - //totalBytes int64 - written int64 //total bytes of file downloaded from remote - err error + written int64 //total bytes of file downloaded from remote + err error - partBodyMaxRetries int + concurrency int //剩余的并发数,递减。到0时停止并发 + maxPart int //有多少个分片 + pos int64 + maxPos int64 + m2 sync.Mutex + readingID int // 正在被读取的id +} + +type ConcurrencyLimit struct { + _m sync.Mutex + Limit int // 需要大于0 +} + +var ErrExceedMaxConcurrency = fmt.Errorf("ExceedMaxConcurrency") + +func (l *ConcurrencyLimit) sub() error { + l._m.Lock() + defer l._m.Unlock() + if l.Limit-1 < 0 { + return ErrExceedMaxConcurrency + } + l.Limit-- + // log.Debugf("ConcurrencyLimit.sub: %d", l.Limit) + return nil +} +func (l *ConcurrencyLimit) add() { + l._m.Lock() + defer l._m.Unlock() + l.Limit++ + // log.Debugf("ConcurrencyLimit.add: %d", l.Limit) +} + +// 检测是否超过限制 +func (d *downloader) concurrencyCheck() error { + if d.cfg.ConcurrencyLimit != nil { + return d.cfg.ConcurrencyLimit.sub() + } + return nil +} +func (d *downloader) concurrencyFinish() { + if d.cfg.ConcurrencyLimit != nil { + d.cfg.ConcurrencyLimit.add() + } } // download performs the implementation of the object download across ranged GETs. func (d *downloader) download() (io.ReadCloser, error) { - d.ctx, d.cancel = context.WithCancel(d.ctx) + if err := d.concurrencyCheck(); err != nil { + return nil, err + } + d.ctx, d.cancel = context.WithCancelCause(d.ctx) - pos := d.params.Range.Start - maxPos := d.params.Range.Start + d.params.Range.Length - id := 0 - for pos < maxPos { - finalSize := int64(d.cfg.PartSize) - //check boundary - if pos+finalSize > maxPos { - finalSize = maxPos - pos - } - c := chunk{start: pos, size: finalSize, id: id} - d.chunks = append(d.chunks, c) - pos += finalSize - id++ + maxPart := int(d.params.Range.Length / int64(d.cfg.PartSize)) + if d.params.Range.Length%int64(d.cfg.PartSize) > 0 { + maxPart++ } - if len(d.chunks) < d.cfg.Concurrency { - d.cfg.Concurrency = len(d.chunks) + if maxPart < d.cfg.Concurrency { + d.cfg.Concurrency = maxPart } + log.Debugf("cfgConcurrency:%d", d.cfg.Concurrency) if d.cfg.Concurrency == 1 { + if d.cfg.ConcurrencyLimit != nil { + go func() { + <-d.ctx.Done() + d.concurrencyFinish() + }() + } resp, err := d.cfg.HttpClient(d.ctx, d.params) if err != nil { return nil, err @@ -143,61 +184,114 @@ func (d *downloader) download() (io.ReadCloser, error) { // workers d.chunkChannel = make(chan chunk, d.cfg.Concurrency) - for i := 0; i < d.cfg.Concurrency; i++ { - buf := NewBuf(d.ctx, d.cfg.PartSize, i) - d.bufs = append(d.bufs, buf) - go d.downloadPart() - } - // initial tasks - for i := 0; i < d.cfg.Concurrency; i++ { - d.sendChunkTask() - } + d.maxPart = maxPart + d.pos = d.params.Range.Start + d.maxPos = d.params.Range.Start + d.params.Range.Length + d.concurrency = d.cfg.Concurrency + d.sendChunkTask(true) - var rc io.ReadCloser = NewMultiReadCloser(d.chunks[0].buf, d.interrupt, d.finishBuf) + var rc io.ReadCloser = NewMultiReadCloser(d.bufs[0], d.interrupt, d.finishBuf) // Return error return rc, d.err } -func (d *downloader) sendChunkTask() *chunk { - ch := &d.chunks[d.nextChunk] - ch.buf = d.getBuf(d.nextChunk) - ch.buf.Reset(int(ch.size)) - d.chunkChannel <- *ch - d.nextChunk++ - return ch + +func (d *downloader) sendChunkTask(newConcurrency bool) error { + d.m.Lock() + defer d.m.Unlock() + isNewBuf := d.concurrency > 0 + if newConcurrency { + if d.concurrency <= 0 { + return nil + } + if d.nextChunk > 0 { // 第一个不检查,因为已经检查过了 + if err := d.concurrencyCheck(); err != nil { + return err + } + } + d.concurrency-- + go d.downloadPart() + } + + var buf *Buf + if isNewBuf { + buf = NewBuf(d.ctx, d.cfg.PartSize) + d.bufs = append(d.bufs, buf) + } else { + buf = d.getBuf(d.nextChunk) + } + + if d.pos < d.maxPos { + finalSize := int64(d.cfg.PartSize) + switch d.nextChunk { + case 0: + // 最小分片在前面有助视频播放? + firstSize := d.params.Range.Length % finalSize + if firstSize > 0 { + minSize := finalSize / 2 + if firstSize < minSize { // 最小分片太小就调整到一半 + finalSize = minSize + } else { + finalSize = firstSize + } + } + case 1: + firstSize := d.params.Range.Length % finalSize + minSize := finalSize / 2 + if firstSize > 0 && firstSize < minSize { + finalSize += firstSize - minSize + } + } + buf.Reset(int(finalSize)) + ch := chunk{ + start: d.pos, + size: finalSize, + id: d.nextChunk, + buf: buf, + } + ch.newConcurrency = newConcurrency + d.pos += finalSize + d.nextChunk++ + d.chunkChannel <- ch + return nil + } + return nil } // when the final reader Close, we interrupt func (d *downloader) interrupt() error { - - d.cancel() if d.written != d.params.Range.Length { log.Debugf("Downloader interrupt before finish") if d.getErr() == nil { d.setErr(fmt.Errorf("interrupted")) } } + d.cancel(d.err) defer func() { close(d.chunkChannel) for _, buf := range d.bufs { buf.Close() } + if d.concurrency > 0 { + d.concurrency = -d.concurrency + } + log.Debugf("maxConcurrency:%d", d.cfg.Concurrency+d.concurrency) }() return d.err } func (d *downloader) getBuf(id int) (b *Buf) { - - return d.bufs[id%d.cfg.Concurrency] + return d.bufs[id%len(d.bufs)] } -func (d *downloader) finishBuf(id int) (isLast bool, buf *Buf) { - if id >= len(d.chunks)-1 { +func (d *downloader) finishBuf(id int) (isLast bool, nextBuf *Buf) { + id++ + if id >= d.maxPart { return true, nil } - if d.nextChunk > id+1 { - return false, d.getBuf(id + 1) - } - ch := d.sendChunkTask() - return false, ch.buf + + d.sendChunkTask(false) + + d.readingID = id + return false, d.getBuf(id) } // downloadPart is an individual goroutine worker reading from the ch channel @@ -212,58 +306,119 @@ func (d *downloader) downloadPart() { if d.getErr() != nil { // Drain the channel if there is an error, to prevent deadlocking // of download producer. - continue + break } - log.Debugf("downloadPart tried to get chunk") if err := d.downloadChunk(&c); err != nil { + if err == errCancelConcurrency { + break + } + if err == context.Canceled { + if e := context.Cause(d.ctx); e != nil { + err = e + } + } d.setErr(err) + d.cancel(err) } } + d.concurrencyFinish() } // downloadChunk downloads the chunk func (d *downloader) downloadChunk(ch *chunk) error { - log.Debugf("start new chunk %+v buffer_id =%d", ch, ch.id) + log.Debugf("start chunk_%d, %+v", ch.id, ch) + params := d.getParamsFromChunk(ch) var n int64 var err error - params := d.getParamsFromChunk(ch) - for retry := 0; retry <= d.partBodyMaxRetries; retry++ { + for retry := 0; retry <= d.cfg.PartBodyMaxRetries; retry++ { if d.getErr() != nil { - return d.getErr() + return nil } n, err = d.tryDownloadChunk(params, ch) if err == nil { + d.incrWritten(n) + log.Debugf("chunk_%d downloaded", ch.id) break } - // Check if the returned error is an errReadingBody. - // If err is errReadingBody this indicates that an error - // occurred while copying the http response body. + if d.getErr() != nil { + return nil + } + if utils.IsCanceled(d.ctx) { + return d.ctx.Err() + } + // Check if the returned error is an errNeedRetry. // If this occurs we unwrap the err to set the underlying error // and attempt any remaining retries. - if bodyErr, ok := err.(*errReadingBody); ok { - err = bodyErr.Unwrap() + if e, ok := err.(*errNeedRetry); ok { + err = e.Unwrap() + if n > 0 { + // 测试:下载时 断开 alist向云盘发起的下载连接 + // 校验:下载完后校验文件哈希值 一致 + d.incrWritten(n) + ch.start += n + ch.size -= n + params.Range.Start = ch.start + params.Range.Length = ch.size + } + log.Warnf("err chunk_%d, object part download error %s, retrying attempt %d. %v", + ch.id, params.URL, retry, err) + } else if err == errInfiniteRetry { + retry-- + continue } else { - return err + break } - - //ch.cur = 0 - - log.Debugf("object part body download interrupted %s, err, %v, retrying attempt %d", - params.URL, err, retry) } - d.incrWritten(n) - log.Debugf("down_%d downloaded chunk", ch.id) - //ch.buf.buffer.wg1.Wait() - //log.Debugf("down_%d downloaded chunk,wg wait passed", ch.id) return err } -func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) { +var errCancelConcurrency = fmt.Errorf("cancel concurrency") +var errInfiniteRetry = fmt.Errorf("infinite retry") +func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) { resp, err := d.cfg.HttpClient(d.ctx, params) if err != nil { - return 0, err + if resp == nil { + return 0, err + } + if ch.id == 0 { //第1个任务 有限的重试,超过重试就会结束请求 + switch resp.StatusCode { + default: + return 0, err + case http.StatusTooManyRequests: + case http.StatusBadGateway: + case http.StatusServiceUnavailable: + case http.StatusGatewayTimeout: + } + <-time.After(time.Millisecond * 200) + return 0, &errNeedRetry{err: fmt.Errorf("http request failure,status: %d", resp.StatusCode)} + } + + // 来到这 说明第1个分片下载 连接成功了 + // 后续分片下载出错都当超载处理 + log.Debugf("err chunk_%d, try downloading:%v", ch.id, err) + + d.m.Lock() + isCancelConcurrency := ch.newConcurrency + if d.concurrency > 0 { // 取消剩余的并发任务 + // 用于计算实际的并发数 + d.concurrency = -d.concurrency + isCancelConcurrency = true + } + if isCancelConcurrency { + d.concurrency-- + d.chunkChannel <- *ch + d.m.Unlock() + return 0, errCancelConcurrency + } + d.m.Unlock() + if ch.id != d.readingID { //正在被读取的优先重试 + d.m2.Lock() + defer d.m2.Unlock() + <-time.After(time.Millisecond * 200) + } + return 0, errInfiniteRetry } defer resp.Body.Close() //only check file size on the first task @@ -273,15 +428,15 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int return 0, err } } - + d.sendChunkTask(true) n, err := utils.CopyWithBuffer(ch.buf, resp.Body) if err != nil { - return n, &errReadingBody{err: err} + return n, &errNeedRetry{err: err} } if n != ch.size { err = fmt.Errorf("chunk download size incorrect, expected=%d, got=%d", ch.size, n) - return n, &errReadingBody{err: err} + return n, &errNeedRetry{err: err} } return n, nil @@ -297,7 +452,7 @@ func (d *downloader) getParamsFromChunk(ch *chunk) *HttpRequestParams { func (d *downloader) checkTotalBytes(resp *http.Response) error { var err error - var totalBytes int64 = math.MinInt64 + totalBytes := int64(-1) contentRange := resp.Header.Get("Content-Range") if len(contentRange) == 0 { // ContentRange is nil when the full file contents is provided, and @@ -329,8 +484,9 @@ func (d *downloader) checkTotalBytes(resp *http.Response) error { err = fmt.Errorf("expect file size=%d unmatch remote report size=%d, need refresh cache", d.params.Size, totalBytes) } if err != nil { - _ = d.interrupt() + // _ = d.interrupt() d.setErr(err) + d.cancel(err) } return err @@ -369,9 +525,7 @@ type chunk struct { buf *Buf id int - // Downloader takes range (start,length), but this chunk is requesting equal/sub range of it. - // To convert the writer to reader eventually, we need to write within the boundary - //boundary http_range.Range + newConcurrency bool } func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) { @@ -379,7 +533,7 @@ func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*ht res, err := RequestHttp(ctx, "GET", header, params.URL) if err != nil { - return nil, err + return res, err } return res, nil } @@ -392,15 +546,15 @@ type HttpRequestParams struct { //total file size Size int64 } -type errReadingBody struct { +type errNeedRetry struct { err error } -func (e *errReadingBody) Error() string { - return fmt.Sprintf("failed to read part body: %v", e.err) +func (e *errNeedRetry) Error() string { + return e.err.Error() } -func (e *errReadingBody) Unwrap() error { +func (e *errNeedRetry) Unwrap() error { return e.err } @@ -438,9 +592,13 @@ func (mr MultiReadCloser) Read(p []byte) (n int, err error) { } mr.cfg.curBuf = next mr.cfg.rPos++ - //current.Close() return n, nil } + if err == context.Canceled { + if e := context.Cause(mr.cfg.curBuf.ctx); e != nil { + err = e + } + } return n, err } func (mr MultiReadCloser) Close() error { @@ -453,18 +611,16 @@ type Buf struct { ctx context.Context off int rw sync.Mutex - //notify chan struct{} } // NewBuf is a buffer that can have 1 read & 1 write at the same time. // when read is faster write, immediately feed data to read after written -func NewBuf(ctx context.Context, maxSize int, id int) *Buf { +func NewBuf(ctx context.Context, maxSize int) *Buf { d := make([]byte, 0, maxSize) return &Buf{ ctx: ctx, buffer: bytes.NewBuffer(d), size: maxSize, - //notify: make(chan struct{}), } } func (br *Buf) Reset(size int) { @@ -502,8 +658,6 @@ func (br *Buf) Read(p []byte) (n int, err error) { select { case <-br.ctx.Done(): return 0, br.ctx.Err() - //case <-br.notify: - // return 0, nil case <-time.After(time.Millisecond * 200): return 0, nil } @@ -516,13 +670,9 @@ func (br *Buf) Write(p []byte) (n int, err error) { br.rw.Lock() defer br.rw.Unlock() n, err = br.buffer.Write(p) - select { - //case br.notify <- struct{}{}: - default: - } return } func (br *Buf) Close() { - //close(br.notify) + br.buffer.Reset() } diff --git a/internal/net/serve.go b/internal/net/serve.go index e85f61a8..6216cd21 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -52,7 +52,8 @@ import ( // // If the caller has set w's ETag header formatted per RFC 7232, section 2.3, // ServeHTTP uses it to handle requests using If-Match, If-None-Match, or If-Range. -func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReaderFunc model.RangeReaderFunc) { +func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReadCloser model.RangeReadCloserIF) { + defer RangeReadCloser.Close() setLastModified(w, modTime) done, rangeReq := checkPreconditions(w, r, modTime) if done { @@ -110,11 +111,19 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time // or unknown file size, ignore the range request. ranges = nil } + + // 使用请求的Context + // 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞 + ctx := r.Context() switch { case len(ranges) == 0: - reader, err := RangeReaderFunc(context.Background(), http_range.Range{Length: -1}) + reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1}) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + code = http.StatusRequestedRangeNotSatisfiable + if err == ErrExceedMaxConcurrency { + code = http.StatusTooManyRequests + } + http.Error(w, err.Error(), code) return } sendContent = reader @@ -131,9 +140,13 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time // does not request multiple parts might not support // multipart responses." ra := ranges[0] - sendContent, err = RangeReaderFunc(context.Background(), ra) + sendContent, err = RangeReadCloser.RangeRead(ctx, ra) if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) + code = http.StatusRequestedRangeNotSatisfiable + if err == ErrExceedMaxConcurrency { + code = http.StatusTooManyRequests + } + http.Error(w, err.Error(), code) return } sendSize = ra.Length @@ -158,7 +171,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time pw.CloseWithError(err) return } - reader, err := RangeReaderFunc(context.Background(), ra) + reader, err := RangeReadCloser.RangeRead(ctx, ra) if err != nil { pw.CloseWithError(err) return @@ -167,14 +180,12 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time pw.CloseWithError(err) return } - //defer reader.Close() } mw.Close() pw.Close() }() } - //defer sendContent.Close() w.Header().Set("Accept-Ranges", "bytes") if w.Header().Get("Content-Encoding") == "" { @@ -190,7 +201,11 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time if written != sendSize { log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize) } - http.Error(w, err.Error(), http.StatusInternalServerError) + code = http.StatusInternalServerError + if err == ErrExceedMaxConcurrency { + code = http.StatusTooManyRequests + } + http.Error(w, err.Error(), code) } } } @@ -239,7 +254,7 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea _ = res.Body.Close() msg := string(all) log.Debugln(msg) - return nil, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, res.StatusCode, msg) + return res, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, res.StatusCode, msg) } return res, nil } diff --git a/internal/net/util.go b/internal/net/util.go index 44201859..45301dde 100644 --- a/internal/net/util.go +++ b/internal/net/util.go @@ -2,7 +2,6 @@ package net import ( "fmt" - "github.com/alist-org/alist/v3/pkg/utils" "io" "math" "mime/multipart" @@ -11,6 +10,8 @@ import ( "strings" "time" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/pkg/http_range" log "github.com/sirupsen/logrus" ) diff --git a/internal/offline_download/transmission/client.go b/internal/offline_download/transmission/client.go index 8049afd6..ae136009 100644 --- a/internal/offline_download/transmission/client.go +++ b/internal/offline_download/transmission/client.go @@ -5,7 +5,6 @@ import ( "context" "encoding/base64" "fmt" - "io" "net/http" "net/url" "strconv" @@ -15,6 +14,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/offline_download/tool" "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/utils" "github.com/hekmon/transmissionrpc/v3" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -92,7 +92,7 @@ func (t *Transmission) AddURL(args *tool.AddUrlArgs) (string, error) { buffer := new(bytes.Buffer) encoder := base64.NewEncoder(base64.StdEncoding, buffer) // Stream file to the encoder - if _, err = io.Copy(encoder, resp.Body); err != nil { + if _, err = utils.CopyWithBuffer(encoder, resp.Body); err != nil { return "", errors.Wrap(err, "can't copy file content into the base64 encoder") } // Flush last bytes diff --git a/internal/stream/stream.go b/internal/stream/stream.go index b19eb077..0915ee6b 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -122,7 +122,8 @@ const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024 // also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { if httpRange.Length == -1 { - httpRange.Length = f.GetSize() + // 参考 internal/net/request.go + httpRange.Length = f.GetSize() - httpRange.Start } if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) { return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil @@ -210,7 +211,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) // RangeRead is not thread-safe, pls use it in single thread only. func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { if httpRange.Length == -1 { - httpRange.Length = ss.GetSize() + httpRange.Length = ss.GetSize() - httpRange.Start } if ss.mFile != nil { return io.NewSectionReader(ss.mFile, httpRange.Start, httpRange.Length), nil diff --git a/internal/stream/util.go b/internal/stream/util.go index 7d2b7ef7..16854c38 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -6,7 +6,6 @@ import ( "io" "net/http" - "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/pkg/http_range" @@ -17,7 +16,6 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl if len(link.URL) == 0 { return nil, fmt.Errorf("can't create RangeReadCloser since URL is empty in link") } - //remoteClosers := utils.EmptyClosers() rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) { if link.Concurrency != 0 || link.PartSize != 0 { header := net.ProcessHeader(http.Header{}, link.Header) @@ -32,37 +30,29 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl HeaderRef: header, } rc, err := down.Download(ctx, req) - if err != nil { - return nil, errs.NewErr(err, "GetReadCloserFromLink failed") - } - return rc, nil + return rc, err } - if len(link.URL) > 0 { - response, err := RequestRangedHttp(ctx, link, r.Start, r.Length) - if err != nil { - if response == nil { - return nil, fmt.Errorf("http request failure, err:%s", err) - } - return nil, fmt.Errorf("http request failure,status: %d err:%s", response.StatusCode, err) + response, err := RequestRangedHttp(ctx, link, r.Start, r.Length) + if err != nil { + if response == nil { + return nil, fmt.Errorf("http request failure, err:%s", err) } - if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent || - checkContentRange(&response.Header, r.Start) { - return response.Body, nil - } else if response.StatusCode == http.StatusOK { - log.Warnf("remote http server not supporting range request, expect low perfromace!") - readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length) - if err != nil { - return nil, err - } - return readCloser, nil - - } - + return nil, err + } + if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent || + checkContentRange(&response.Header, r.Start) { return response.Body, nil + } else if response.StatusCode == http.StatusOK { + log.Warnf("remote http server not supporting range request, expect low perfromace!") + readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length) + if err != nil { + return nil, err + } + return readCloser, nil } - return nil, errs.NotSupport + return response.Body, nil } resultRangeReadCloser := model.RangeReadCloser{RangeReader: rangeReaderFunc} return &resultRangeReadCloser, nil diff --git a/server/common/proxy.go b/server/common/proxy.go index 10923613..2d828efd 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -27,16 +27,11 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. return nil } else if link.RangeReadCloser != nil { attachFileName(w, file) - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeRead) - defer func() { - _ = link.RangeReadCloser.Close() - }() + net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser) return nil } else if link.Concurrency != 0 || link.PartSize != 0 { attachFileName(w, file) size := file.GetSize() - //var finalClosers model.Closers - finalClosers := utils.EmptyClosers() header := net.ProcessHeader(r.Header, link.Header) rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { down := net.NewDownloader(func(d *net.Downloader) { @@ -50,16 +45,14 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. HeaderRef: header, } rc, err := down.Download(ctx, req) - finalClosers.Add(rc) return rc, err } - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), rangeReader) - defer finalClosers.Close() + net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &model.RangeReadCloser{RangeReader: rangeReader}) return nil } else { //transparent proxy header := net.ProcessHeader(r.Header, link.Header) - res, err := net.RequestHttp(context.Background(), r.Method, header, link.URL) + res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL) if err != nil { return err } @@ -72,7 +65,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. if r.Method == http.MethodHead { return nil } - _, err = io.Copy(w, res.Body) + _, err = utils.CopyWithBuffer(w, res.Body) if err != nil { return err } diff --git a/server/handles/archive.go b/server/handles/archive.go index 29dbf3c2..bad99bac 100644 --- a/server/handles/archive.go +++ b/server/handles/archive.go @@ -281,10 +281,11 @@ func ArchiveDown(c *gin.Context) { link, _, err := fs.ArchiveDriverExtract(c, archiveRawPath, model.ArchiveInnerArgs{ ArchiveArgs: model.ArchiveArgs{ LinkArgs: model.LinkArgs{ - IP: c.ClientIP(), - Header: c.Request.Header, - Type: c.Query("type"), - HttpReq: c.Request, + IP: c.ClientIP(), + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + Redirect: true, }, Password: password, }, diff --git a/server/handles/down.go b/server/handles/down.go index f01c9d66..b2f9a21b 100644 --- a/server/handles/down.go +++ b/server/handles/down.go @@ -31,10 +31,11 @@ func Down(c *gin.Context) { return } else { link, _, err := fs.Link(c, rawPath, model.LinkArgs{ - IP: c.ClientIP(), - Header: c.Request.Header, - Type: c.Query("type"), - HttpReq: c.Request, + IP: c.ClientIP(), + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + Redirect: true, }) if err != nil { common.ErrorResp(c, err, 500) diff --git a/server/s3/backend.go b/server/s3/backend.go index e0cfd967..bca45008 100644 --- a/server/s3/backend.go +++ b/server/s3/backend.go @@ -6,13 +6,14 @@ import ( "context" "encoding/hex" "fmt" - "github.com/pkg/errors" "io" "path" "strings" "sync" "time" + "github.com/pkg/errors" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" @@ -173,15 +174,27 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string if link.RangeReadCloser == nil && link.MFile == nil && len(link.URL) == 0 { return nil, fmt.Errorf("the remote storage driver need to be enhanced to support s3") } - remoteFileSize := file.GetSize() - remoteClosers := utils.EmptyClosers() - rangeReaderFunc := func(ctx context.Context, start, length int64) (io.ReadCloser, error) { + + var rdr io.ReadCloser + length := int64(-1) + start := int64(0) + if rnge != nil { + start, length = rnge.Start, rnge.Length + } + // 参考 server/common/proxy.go + if link.MFile != nil { + _, err := link.MFile.Seek(start, io.SeekStart) + if err != nil { + return nil, err + } + rdr = link.MFile + } else { + remoteFileSize := file.GetSize() if length >= 0 && start+length >= remoteFileSize { length = -1 } rrc := link.RangeReadCloser if len(link.URL) > 0 { - rangedRemoteLink := &model.Link{ URL: link.URL, Header: link.Header, @@ -194,35 +207,12 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string } if rrc != nil { remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: start, Length: length}) - remoteClosers.AddClosers(rrc.GetClosers()) if err != nil { return nil, err } - return remoteReader, nil - } - if link.MFile != nil { - _, err := link.MFile.Seek(start, io.SeekStart) - if err != nil { - return nil, err - } - //remoteClosers.Add(remoteLink.MFile) - //keep reuse same MFile and close at last. - remoteClosers.Add(link.MFile) - return io.NopCloser(link.MFile), nil - } - return nil, errs.NotSupport - } - - var rdr io.ReadCloser - if rnge != nil { - rdr, err = rangeReaderFunc(ctx, rnge.Start, rnge.Length) - if err != nil { - return nil, err - } - } else { - rdr, err = rangeReaderFunc(ctx, 0, -1) - if err != nil { - return nil, err + rdr = utils.ReadCloser{Reader: remoteReader, Closer: rrc} + } else { + return nil, errs.NotSupport } } diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index b84e65b0..6585056b 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -263,7 +263,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta w.Header().Set("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate") http.Redirect(w, r, u, http.StatusFound) } else { - link, _, err := fs.Link(ctx, reqPath, model.LinkArgs{IP: utils.ClientIP(r), Header: r.Header, HttpReq: r}) + link, _, err := fs.Link(ctx, reqPath, model.LinkArgs{IP: utils.ClientIP(r), Header: r.Header, HttpReq: r, Redirect: true}) if err != nil { return http.StatusInternalServerError, err } From 5c5d8378e5650fe6b81807956d450f700ba7acae Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Mon, 27 Jan 2025 20:08:56 +0800 Subject: [PATCH 090/187] fix(archive): unable to preview (#7843) * fix(archive): unrecognition zip * feat(archive): add tree for zip meta * fix bug * refactor(archive): meta cache time use Link Expiration first * feat(archive): return sort policy in meta (#2) * refactor * perf(archive): reduce new network requests --------- Co-authored-by: KirCute_ECT <951206789@qq.com> --- internal/archive/archives/archives.go | 31 ++++-- internal/archive/archives/utils.go | 12 ++- internal/archive/zip/zip.go | 102 +++++++++++++++--- internal/model/archive.go | 4 + internal/op/archive.go | 43 ++++++-- internal/stream/stream.go | 144 +++++++++++++++++++++----- pkg/utils/io.go | 6 ++ server/handles/archive.go | 18 ++-- 8 files changed, 287 insertions(+), 73 deletions(-) diff --git a/internal/archive/archives/archives.go b/internal/archive/archives/archives.go index b70ba95b..6d48624f 100644 --- a/internal/archive/archives/archives.go +++ b/internal/archive/archives/archives.go @@ -1,42 +1,53 @@ package archives import ( - "github.com/alist-org/alist/v3/internal/archive/tool" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/pkg/utils" "io" "io/fs" "os" stdpath "path" "strings" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/utils" ) type Archives struct { } -func (_ *Archives) AcceptedExtensions() []string { +func (*Archives) AcceptedExtensions() []string { return []string{ ".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z", } } -func (_ *Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { +func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { fsys, err := getFs(ss, args) if err != nil { return nil, err } - _, err = fsys.ReadDir(".") + files, err := fsys.ReadDir(".") if err != nil { return nil, filterPassword(err) } + + tree := make([]model.ObjTree, 0, len(files)) + for _, file := range files { + info, err := file.Info() + if err != nil { + continue + } + tree = append(tree, &model.ObjectTree{Object: *toModelObj(info)}) + } return &model.ArchiveMetaInfo{ Comment: "", Encrypted: false, + Tree: tree, }, nil } -func (_ *Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { +func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { fsys, err := getFs(ss, args.ArchiveArgs) if err != nil { return nil, err @@ -58,7 +69,7 @@ func (_ *Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) }) } -func (_ *Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { +func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { fsys, err := getFs(ss, args.ArchiveArgs) if err != nil { return nil, 0, err @@ -74,7 +85,7 @@ func (_ *Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArg return file, stat.Size(), nil } -func (_ *Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { +func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { fsys, err := getFs(ss, args.ArchiveArgs) if err != nil { return err diff --git a/internal/archive/archives/utils.go b/internal/archive/archives/utils.go index b72e6bc6..fdae1009 100644 --- a/internal/archive/archives/utils.go +++ b/internal/archive/archives/utils.go @@ -1,15 +1,16 @@ package archives import ( - "github.com/alist-org/alist/v3/internal/errs" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/mholt/archives" "io" fs2 "io/fs" "os" stdpath "path" "strings" + + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/mholt/archives" ) func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.ArchiveFS, error) { @@ -17,6 +18,9 @@ func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.Archive if err != nil { return nil, err } + if r, ok := reader.(*stream.RangeReadReadAtSeeker); ok { + r.InitHeadCache() + } format, _, err := archives.Identify(ss.Ctx, ss.GetName(), reader) if err != nil { return nil, errs.UnknownArchiveFormat diff --git a/internal/archive/zip/zip.go b/internal/archive/zip/zip.go index ccb70e65..e5285518 100644 --- a/internal/archive/zip/zip.go +++ b/internal/archive/zip/zip.go @@ -1,25 +1,26 @@ package zip import ( + "io" + "os" + stdpath "path" + "strings" + "github.com/alist-org/alist/v3/internal/archive/tool" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/stream" "github.com/yeka/zip" - "io" - "os" - stdpath "path" - "strings" ) type Zip struct { } -func (_ *Zip) AcceptedExtensions() []string { +func (*Zip) AcceptedExtensions() []string { return []string{".zip"} } -func (_ *Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { +func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { reader, err := stream.NewReadAtSeeker(ss, 0) if err != nil { return nil, err @@ -29,19 +30,81 @@ func (_ *Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model. return nil, err } encrypted := false + dirMap := make(map[string]*model.ObjectTree) + dirMap["."] = &model.ObjectTree{} for _, file := range zipReader.File { if file.IsEncrypted() { encrypted = true break } + + name := strings.TrimPrefix(decodeName(file.Name), "/") + var dir string + var dirObj *model.ObjectTree + isNewFolder := false + if !file.FileInfo().IsDir() { + // 先将 文件 添加到 所在的文件夹 + dir = stdpath.Dir(name) + dirObj = dirMap[dir] + if dirObj == nil { + isNewFolder = true + dirObj = &model.ObjectTree{} + dirObj.IsFolder = true + dirObj.Name = stdpath.Base(dir) + dirObj.Modified = file.ModTime() + dirMap[dir] = dirObj + } + dirObj.Children = append( + dirObj.Children, &model.ObjectTree{ + Object: *toModelObj(file.FileInfo()), + }, + ) + } else { + dir = strings.TrimSuffix(name, "/") + dirObj = dirMap[dir] + if dirObj == nil { + isNewFolder = true + dirObj = &model.ObjectTree{} + dirMap[dir] = dirObj + } + dirObj.IsFolder = true + dirObj.Name = stdpath.Base(dir) + dirObj.Modified = file.ModTime() + } + if isNewFolder { + // 将 文件夹 添加到 父文件夹 + dir = stdpath.Dir(dir) + pDirObj := dirMap[dir] + if pDirObj != nil { + pDirObj.Children = append(pDirObj.Children, dirObj) + continue + } + + for { + // 考虑压缩包仅记录文件的路径,不记录文件夹 + pDirObj = &model.ObjectTree{} + pDirObj.IsFolder = true + pDirObj.Name = stdpath.Base(dir) + pDirObj.Modified = file.ModTime() + dirMap[dir] = pDirObj + pDirObj.Children = append(pDirObj.Children, dirObj) + dir = stdpath.Dir(dir) + if dirMap[dir] != nil { + break + } + dirObj = pDirObj + } + } } + return &model.ArchiveMetaInfo{ Comment: zipReader.Comment, Encrypted: encrypted, + Tree: dirMap["."].GetChildren(), }, nil } -func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { +func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { reader, err := stream.NewReadAtSeeker(ss, 0) if err != nil { return nil, err @@ -53,6 +116,7 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo if args.InnerPath == "/" { ret := make([]model.Obj, 0) passVerified := false + var dir *model.Object for _, file := range zipReader.File { if !passVerified && file.IsEncrypted() { file.SetPassword(args.Password) @@ -63,12 +127,24 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo _ = rc.Close() passVerified = true } - name := decodeName(file.Name) - if strings.Contains(strings.TrimSuffix(name, "/"), "/") { + name := strings.TrimSuffix(decodeName(file.Name), "/") + if strings.Contains(name, "/") { + // 有些压缩包不压缩第一个文件夹 + strs := strings.Split(name, "/") + if dir == nil && len(strs) == 2 { + dir = &model.Object{ + Name: strs[0], + Modified: ss.ModTime(), + IsFolder: true, + } + } continue } ret = append(ret, toModelObj(file.FileInfo())) } + if len(ret) == 0 && dir != nil { + ret = append(ret, dir) + } return ret, nil } else { innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/" @@ -76,13 +152,11 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo exist := false for _, file := range zipReader.File { name := decodeName(file.Name) - if name == innerPath { - exist = true - } dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/" if dir != innerPath { continue } + exist = true ret = append(ret, toModelObj(file.FileInfo())) } if !exist { @@ -92,7 +166,7 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo } } -func (_ *Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { +func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { reader, err := stream.NewReadAtSeeker(ss, 0) if err != nil { return nil, 0, err @@ -117,7 +191,7 @@ func (_ *Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (i return nil, 0, errs.ObjectNotFound } -func (_ *Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { +func (*Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { reader, err := stream.NewReadAtSeeker(ss, 0) if err != nil { return err diff --git a/internal/model/archive.go b/internal/model/archive.go index 03ac7c36..01b83691 100644 --- a/internal/model/archive.go +++ b/internal/model/archive.go @@ -1,5 +1,7 @@ package model +import "time" + type ObjTree interface { Obj GetChildren() []ObjTree @@ -45,5 +47,7 @@ func (m *ArchiveMetaInfo) GetTree() []ObjTree { type ArchiveMetaProvider struct { ArchiveMeta + *Sort DriverProviding bool + Expiration *time.Duration } diff --git a/internal/op/archive.go b/internal/op/archive.go index 6a9fa084..a241838c 100644 --- a/internal/op/archive.go +++ b/internal/op/archive.go @@ -3,13 +3,14 @@ package op import ( "context" stderrors "errors" - "github.com/alist-org/alist/v3/internal/archive/tool" - "github.com/alist-org/alist/v3/internal/stream" "io" stdpath "path" "strings" "time" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/Xhofe/go-cache" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -40,8 +41,8 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg if err != nil { return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err) } - if !storage.Config().NoCache { - archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) + if m.Expiration != nil { + archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration)) } return m, nil } @@ -82,7 +83,15 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg } meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs) if !errors.Is(err, errs.NotImplement) { - return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}, err + archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true} + if meta.GetTree() != nil { + archiveMetaProvider.Sort = &storage.GetStorage().Sort + } + if !storage.Config().NoCache { + Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) + archiveMetaProvider.Expiration = &Expiration + } + return obj, archiveMetaProvider, err } } obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) @@ -95,7 +104,21 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg } }() meta, err := t.GetMeta(ss, args.ArchiveArgs) - return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false}, err + if err != nil { + return nil, nil, err + } + archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false} + if meta.GetTree() != nil { + archiveMetaProvider.Sort = &storage.GetStorage().Sort + } + if !storage.Config().NoCache { + Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) + archiveMetaProvider.Expiration = &Expiration + } else if ss.Link.MFile == nil { + // alias、crypt 驱动 + archiveMetaProvider.Expiration = ss.Link.Expiration + } + return obj, archiveMetaProvider, err } var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64)) @@ -113,10 +136,10 @@ func ListArchive(ctx context.Context, storage driver.Driver, path string, args m log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath) return files, nil } - if meta, ok := archiveMetaCache.Get(metaKey); ok { - log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath) - return getChildrenFromArchiveMeta(meta, args.InnerPath) - } + // if meta, ok := archiveMetaCache.Get(metaKey); ok { + // log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath) + // return getChildrenFromArchiveMeta(meta, args.InnerPath) + // } } objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) { obj, files, err := listArchive(ctx, storage, path, args) diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 0915ee6b..1962fb46 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -13,6 +13,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" + "github.com/sirupsen/logrus" ) type FileStream struct { @@ -189,6 +190,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) if ss.Link.RangeReadCloser != nil { ss.rangeReadCloser = ss.Link.RangeReadCloser + ss.Add(ss.rangeReadCloser) return &ss, nil } if len(ss.Link.URL) > 0 { @@ -197,6 +199,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) return nil, err } ss.rangeReadCloser = rrc + ss.Add(rrc) return &ss, nil } } @@ -248,8 +251,6 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) { return 0, nil } ss.Reader = io.NopCloser(rc) - ss.Closers.Add(rc) - } return ss.Reader.Read(p) } @@ -337,10 +338,62 @@ type RangeReadReadAtSeeker struct { ss *SeekableStream masterOff int64 readers []*readerCur + *headCache } -type FileReadAtSeeker struct { - ss *SeekableStream +type headCache struct { + *readerCur + bufs [][]byte +} + +func (c *headCache) read(p []byte) (n int, err error) { + pL := len(p) + logrus.Debugf("headCache read_%d", pL) + if c.cur < int64(pL) { + bufL := int64(pL) - c.cur + buf := make([]byte, bufL) + lr := io.LimitReader(c.reader, bufL) + off := 0 + for c.cur < int64(pL) { + n, err = lr.Read(buf[off:]) + off += n + c.cur += int64(n) + if err == io.EOF && n == int(bufL) { + err = nil + } + if err != nil { + break + } + } + c.bufs = append(c.bufs, buf) + } + n = 0 + if c.cur >= int64(pL) { + for i := 0; n < pL; i++ { + buf := c.bufs[i] + r := len(buf) + if n+r > pL { + r = pL - n + } + n += copy(p[n:], buf[:r]) + } + } + return +} +func (r *headCache) close() error { + for i := range r.bufs { + r.bufs[i] = nil + } + r.bufs = nil + return nil +} + +func (r *RangeReadReadAtSeeker) InitHeadCache() { + if r.ss.Link.MFile == nil && r.masterOff == 0 { + reader := r.readers[0] + r.readers = r.readers[1:] + r.headCache = &headCache{readerCur: reader} + } } func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) { @@ -351,27 +404,23 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStr } return &FileReadAtSeeker{ss: ss}, nil } - var r io.Reader - var err error + r := &RangeReadReadAtSeeker{ + ss: ss, + masterOff: offset, + } if offset != 0 || utils.IsBool(forceRange...) { if offset < 0 || offset > ss.GetSize() { return nil, errors.New("offset out of range") } - r, err = ss.RangeRead(http_range.Range{Start: offset, Length: -1}) + _, err := r.getReaderAtOffset(offset) if err != nil { return nil, err } - if rc, ok := r.(io.Closer); ok { - ss.Closers.Add(rc) - } } else { - r = ss + rc := &readerCur{reader: ss, cur: offset} + r.readers = append(r.readers, rc) } - return &RangeReadReadAtSeeker{ - ss: ss, - masterOff: offset, - readers: []*readerCur{{reader: r, cur: offset}}, - }, nil + return r, nil } func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream { @@ -379,38 +428,71 @@ func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream { } func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) { + var rc *readerCur for _, reader := range r.readers { + if reader.cur == -1 { + continue + } if reader.cur == off { + // logrus.Debugf("getReaderAtOffset match_%d", off) return reader, nil } + if reader.cur > 0 && off >= reader.cur && (rc == nil || reader.cur < rc.cur) { + rc = reader + } } - reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1}) + if rc != nil && off-rc.cur <= utils.MB { + n, err := utils.CopyWithBufferN(utils.NullWriter{}, rc.reader, off-rc.cur) + rc.cur += n + if err == io.EOF && rc.cur == off { + err = nil + } + if err == nil { + logrus.Debugf("getReaderAtOffset old_%d", off) + return rc, nil + } + rc.cur = -1 + } + logrus.Debugf("getReaderAtOffset new_%d", off) + + // Range请求不能超过文件大小,有些云盘处理不了就会返回整个文件 + reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: r.ss.GetSize() - off}) if err != nil { return nil, err } - if c, ok := reader.(io.Closer); ok { - r.ss.Closers.Add(c) - } - rc := &readerCur{reader: reader, cur: off} + rc = &readerCur{reader: reader, cur: off} r.readers = append(r.readers, rc) return rc, nil } func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) { + if off == 0 && r.headCache != nil { + return r.headCache.read(p) + } rc, err := r.getReaderAtOffset(off) if err != nil { return 0, err } - num := 0 + n, num := 0, 0 for num < len(p) { - n, err := rc.reader.Read(p[num:]) + n, err = rc.reader.Read(p[num:]) rc.cur += int64(n) num += n - if err != nil { - return num, err + if err == nil { + continue } + if err == io.EOF { + // io.EOF是reader读取完了 + rc.cur = -1 + // yeka/zip包 没有处理EOF,我们要兼容 + // https://github.com/yeka/zip/blob/03d6312748a9d6e0bc0c9a7275385c09f06d9c14/reader.go#L433 + if num == len(p) { + err = nil + } + } + break } - return num, nil + return num, err } func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) { @@ -437,6 +519,9 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) { } func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { + if r.masterOff == 0 && r.headCache != nil { + return r.headCache.read(p) + } rc, err := r.getReaderAtOffset(r.masterOff) if err != nil { return 0, err @@ -448,9 +533,16 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { } func (r *RangeReadReadAtSeeker) Close() error { + if r.headCache != nil { + r.headCache.close() + } return r.ss.Close() } +type FileReadAtSeeker struct { + ss *SeekableStream +} + func (f *FileReadAtSeeker) GetRawStream() *SeekableStream { return f.ss } diff --git a/pkg/utils/io.go b/pkg/utils/io.go index e06fb235..c314307d 100644 --- a/pkg/utils/io.go +++ b/pkg/utils/io.go @@ -233,3 +233,9 @@ func CopyWithBufferN(dst io.Writer, src io.Reader, n int64) (written int64, err } return } + +type NullWriter struct{} + +func (NullWriter) Write(p []byte) (n int, err error) { + return len(p), nil +} diff --git a/server/handles/archive.go b/server/handles/archive.go index bad99bac..6ff13641 100644 --- a/server/handles/archive.go +++ b/server/handles/archive.go @@ -2,6 +2,10 @@ package handles import ( "fmt" + "net/url" + stdpath "path" + "strings" + "github.com/alist-org/alist/v3/internal/archive/tool" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/errs" @@ -15,9 +19,6 @@ import ( "github.com/gin-gonic/gin" "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "mime" - stdpath "path" - "strings" ) type ArchiveMetaReq struct { @@ -31,6 +32,7 @@ type ArchiveMetaResp struct { Comment string `json:"comment"` IsEncrypted bool `json:"encrypted"` Content []ArchiveContentResp `json:"content"` + Sort *model.Sort `json:"sort,omitempty"` RawURL string `json:"raw_url"` Sign string `json:"sign"` } @@ -128,6 +130,7 @@ func FsArchiveMeta(c *gin.Context) { Comment: ret.GetComment(), IsEncrypted: ret.IsEncrypted(), Content: toContentResp(ret.GetTree()), + Sort: ret.Sort, RawURL: fmt.Sprintf("%s%s%s", common.GetApiUrl(c.Request), api, utils.EncodePath(reqPath, true)), Sign: s, }) @@ -361,14 +364,11 @@ func ArchiveInternalExtract(c *gin.Context) { "Referrer-Policy": "no-referrer", "Cache-Control": "max-age=0, no-cache, no-store, must-revalidate", } - if c.Query("attachment") == "true" { - filename := stdpath.Base(innerPath) - headers["Content-Disposition"] = fmt.Sprintf("attachment; filename=\"%s\"", filename) - } + filename := stdpath.Base(innerPath) + headers["Content-Disposition"] = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename)) contentType := c.Request.Header.Get("Content-Type") if contentType == "" { - fileExt := stdpath.Ext(innerPath) - contentType = mime.TypeByExtension(fileExt) + contentType = utils.GetMimeType(filename) } c.DataFromReader(200, size, contentType, rc, headers) } From 0d4c63e9ff6a4d542c5cee1d5ca56cf9f6102276 Mon Sep 17 00:00:00 2001 From: Jealous Date: Mon, 27 Jan 2025 20:09:17 +0800 Subject: [PATCH 091/187] feat(fs): display the existing filename in error message (#7877) --- server/handles/fsmanage.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go index 9349e7e2..c527464e 100644 --- a/server/handles/fsmanage.go +++ b/server/handles/fsmanage.go @@ -90,7 +90,7 @@ func FsMove(c *gin.Context) { if !req.Overwrite { for _, name := range req.Names { if res, _ := fs.Get(c, stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil { - common.ErrorStrResp(c, "file exists", 403) + common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403) return } } @@ -133,7 +133,7 @@ func FsCopy(c *gin.Context) { if !req.Overwrite { for _, name := range req.Names { if res, _ := fs.Get(c, stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil { - common.ErrorStrResp(c, "file exists", 403) + common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403) return } } @@ -180,7 +180,7 @@ func FsRename(c *gin.Context) { dstPath := stdpath.Join(stdpath.Dir(reqPath), req.Name) if dstPath != reqPath { if res, _ := fs.Get(c, dstPath, &fs.GetArgs{NoLog: true}); res != nil { - common.ErrorStrResp(c, "file exists", 403) + common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", req.Name), 403) return } } From cafdb4d407c9d23663c25c39e424090eeffa5fa9 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Mon, 27 Jan 2025 20:11:21 +0800 Subject: [PATCH 092/187] fix(139): correct path handling in groupGetFiles (#7850 closes #7848,#7603) * fix(139): correct path handling in groupGetFiles * perf(139): reduce the number of requests in groupGetFiles * refactor(139): check authorization expiration (#10) * refactor(139): check authorization expiration * fix bug * chore(139): update api version to 7.14.0 --------- Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> --- drivers/139/driver.go | 53 ++++++++++++++++++++------------------ drivers/139/util.go | 60 +++++++++++++++++++++++++++++-------------- 2 files changed, 69 insertions(+), 44 deletions(-) diff --git a/drivers/139/driver.go b/drivers/139/driver.go index ebb30e25..cf64a8fd 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -2,13 +2,11 @@ package _139 import ( "context" - "encoding/base64" "fmt" "io" "net/http" "path" "strconv" - "strings" "time" "github.com/alist-org/alist/v3/drivers/base" @@ -42,7 +40,11 @@ func (d *Yun139) Init(ctx context.Context) error { if d.Authorization == "" { return fmt.Errorf("authorization is empty") } - d.cron = cron.NewCron(time.Hour * 24 * 7) + err := d.refreshToken() + if err != nil { + return err + } + d.cron = cron.NewCron(time.Hour * 12) d.cron.Do(func() { err := d.refreshToken() if err != nil { @@ -67,28 +69,29 @@ func (d *Yun139) Init(ctx context.Context) error { default: return errs.NotImplement } - if d.ref != nil { - return nil - } - decode, err := base64.StdEncoding.DecodeString(d.Authorization) - if err != nil { - return err - } - decodeStr := string(decode) - splits := strings.Split(decodeStr, ":") - if len(splits) < 2 { - return fmt.Errorf("authorization is invalid, splits < 2") - } - d.Account = splits[1] - _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ - "qryUserExternInfoReq": base.Json{ - "commonAccountInfo": base.Json{ - "account": d.getAccount(), - "accountType": 1, - }, - }, - }, nil) - return err + // if d.ref != nil { + // return nil + // } + // decode, err := base64.StdEncoding.DecodeString(d.Authorization) + // if err != nil { + // return err + // } + // decodeStr := string(decode) + // splits := strings.Split(decodeStr, ":") + // if len(splits) < 2 { + // return fmt.Errorf("authorization is invalid, splits < 2") + // } + // d.Account = splits[1] + // _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ + // "qryUserExternInfoReq": base.Json{ + // "commonAccountInfo": base.Json{ + // "account": d.getAccount(), + // "accountType": 1, + // }, + // }, + // }, nil) + // return err + return nil } func (d *Yun139) InitReference(storage driver.Driver) error { diff --git a/drivers/139/util.go b/drivers/139/util.go index 2dade250..3e1a61ed 100644 --- a/drivers/139/util.go +++ b/drivers/139/util.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "net/url" + "path" "sort" "strconv" "strings" @@ -54,17 +55,37 @@ func getTime(t string) time.Time { } func (d *Yun139) refreshToken() error { - if d.ref == nil { + if d.ref != nil { return d.ref.refreshToken() } - url := "https://aas.caiyun.feixin.10086.cn:443/tellin/authTokenRefresh.do" - var resp RefreshTokenResp decode, err := base64.StdEncoding.DecodeString(d.Authorization) if err != nil { - return err + return fmt.Errorf("authorization decode failed: %s", err) } decodeStr := string(decode) splits := strings.Split(decodeStr, ":") + if len(splits) < 3 { + return fmt.Errorf("authorization is invalid, splits < 3") + } + strs := strings.Split(splits[2], "|") + if len(strs) < 4 { + return fmt.Errorf("authorization is invalid, strs < 4") + } + expiration, err := strconv.ParseInt(strs[3], 10, 64) + if err != nil { + return fmt.Errorf("authorization is invalid") + } + expiration -= time.Now().UnixMilli() + if expiration > 1000*60*60*24*15 { + // Authorization有效期大于15天无需刷新 + return nil + } + if expiration < 0 { + return fmt.Errorf("authorization has expired") + } + + url := "https://aas.caiyun.feixin.10086.cn:443/tellin/authTokenRefresh.do" + var resp RefreshTokenResp reqBody := "" + splits[2] + "" + splits[1] + "656" _, err = base.RestyClient.R(). ForceContentType("application/xml"). @@ -108,15 +129,16 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba //"mcloud-route": "001", "mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), //"mcloud-skey":"", - "mcloud-version": "6.6.0", - "Origin": "https://yun.139.com", - "Referer": "https://yun.139.com/w/", - "x-DeviceInfo": "||9|6.6.0|chrome|95.0.4638.69|uwIy75obnsRPIwlJSd7D9GhUvFwG96ce||macos 10.15.2||zh-CN|||", - "x-huawei-channelSrc": "10000034", - "x-inner-ntwk": "2", - "x-m4c-caller": "PC", - "x-m4c-src": "10002", - "x-SvcType": svcType, + "mcloud-version": "7.14.0", + "Origin": "https://yun.139.com", + "Referer": "https://yun.139.com/w/", + "x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", + "x-huawei-channelSrc": "10000034", + "x-inner-ntwk": "2", + "x-m4c-caller": "PC", + "x-m4c-src": "10002", + "x-SvcType": svcType, + "Inner-Hcy-Router-Https": "1", }) var e BaseResp @@ -269,12 +291,12 @@ func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) { for { data := d.newJson(base.Json{ "groupID": d.CloudID, - "catalogID": catalogID, + "catalogID": path.Base(catalogID), "contentSortType": 0, "sortDirection": 1, "startNumber": pageNum, "endNumber": pageNum + 99, - "path": catalogID, + "path": path.Join(d.RootFolderID, catalogID), }) var resp QueryGroupContentListResp @@ -310,7 +332,7 @@ func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) { } files = append(files, &f) } - if pageNum > resp.Data.GetGroupContentResult.NodeCount { + if (pageNum + 99) > resp.Data.GetGroupContentResult.NodeCount { break } pageNum = pageNum + 100 @@ -393,10 +415,10 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R "Mcloud-Client": "10701", "Mcloud-Route": "001", "Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), - "Mcloud-Version": "7.13.0", + "Mcloud-Version": "7.14.0", "Origin": "https://yun.139.com", "Referer": "https://yun.139.com/w/", - "x-DeviceInfo": "||9|7.13.0|chrome|120.0.0.0|||windows 10||zh-CN|||", + "x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", "x-huawei-channelSrc": "10000034", "x-inner-ntwk": "2", "x-m4c-caller": "PC", @@ -405,7 +427,7 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R "X-Yun-Api-Version": "v1", "X-Yun-App-Channel": "10000034", "X-Yun-Channel-Source": "10000034", - "X-Yun-Client-Info": "||9|7.13.0|chrome|120.0.0.0|||windows 10||zh-CN|||dW5kZWZpbmVk||", + "X-Yun-Client-Info": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||dW5kZWZpbmVk||", "X-Yun-Module-Type": "100", "X-Yun-Svc-Type": "1", }) From 23f3178f39981a6bdfcf90871cb8bfc3aed05117 Mon Sep 17 00:00:00 2001 From: LaoShui <79132480+laoshuikaixue@users.noreply.github.com> Date: Mon, 27 Jan 2025 20:13:35 +0800 Subject: [PATCH 093/187] chore(README): formatting spacing in README links (#7879) [skip ci] --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8140f325..d1189188 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ --- -English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md) +English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md) ## Features From d5ec998699dd592e3ee7f54cf5bcce7dc697c173 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Mon, 27 Jan 2025 20:18:10 +0800 Subject: [PATCH 094/187] feat(task): allow retry canceled (#7852) --- internal/conf/config.go | 14 ++++++++------ internal/fs/archive.go | 6 +++++- internal/fs/copy.go | 1 + internal/offline_download/tool/download.go | 1 + internal/offline_download/tool/transfer.go | 1 + internal/task/base.go | 15 +++++++++++++++ 6 files changed, 31 insertions(+), 7 deletions(-) diff --git a/internal/conf/config.go b/internal/conf/config.go index 39b23227..1766ae84 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -53,12 +53,13 @@ type TaskConfig struct { } type TasksConfig struct { - Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"` - Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"` - Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"` - Copy TaskConfig `json:"copy" envPrefix:"COPY_"` - Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"` - DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"` + Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"` + Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"` + Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"` + Copy TaskConfig `json:"copy" envPrefix:"COPY_"` + Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"` + DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"` + AllowRetryCanceled bool `json:"allow_retry_canceled" env:"ALLOW_RETRY_CANCELED"` } type Cors struct { @@ -182,6 +183,7 @@ func DefaultConfig() *Config { Workers: 5, MaxRetry: 2, }, + AllowRetryCanceled: false, }, Cors: Cors{ AllowOrigins: []string{"*"}, diff --git a/internal/fs/archive.go b/internal/fs/archive.go index f3e05926..39131827 100644 --- a/internal/fs/archive.go +++ b/internal/fs/archive.go @@ -50,6 +50,7 @@ func (t *ArchiveDownloadTask) GetStatus() string { } func (t *ArchiveDownloadTask) Run() error { + t.ReinitCtx() t.ClearEndTime() t.SetStartTime(time.Now()) defer func() { t.SetEndTime(time.Now()) }() @@ -144,6 +145,7 @@ func (t *ArchiveContentUploadTask) GetStatus() string { } func (t *ArchiveContentUploadTask) Run() error { + t.ReinitCtx() t.ClearEndTime() t.SetStartTime(time.Now()) defer func() { t.SetEndTime(time.Now()) }() @@ -235,7 +237,9 @@ func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTsk *Archi func (t *ArchiveContentUploadTask) Cancel() { t.TaskExtension.Cancel() - t.deleteSrcFile() + if !conf.Conf.Tasks.AllowRetryCanceled { + t.deleteSrcFile() + } } func (t *ArchiveContentUploadTask) deleteSrcFile() { diff --git a/internal/fs/copy.go b/internal/fs/copy.go index 977f7280..155e3cf7 100644 --- a/internal/fs/copy.go +++ b/internal/fs/copy.go @@ -39,6 +39,7 @@ func (t *CopyTask) GetStatus() string { } func (t *CopyTask) Run() error { + t.ReinitCtx() t.ClearEndTime() t.SetStartTime(time.Now()) defer func() { t.SetEndTime(time.Now()) }() diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go index c3b30f1b..42b2dbfb 100644 --- a/internal/offline_download/tool/download.go +++ b/internal/offline_download/tool/download.go @@ -28,6 +28,7 @@ type DownloadTask struct { } func (t *DownloadTask) Run() error { + t.ReinitCtx() t.ClearEndTime() t.SetStartTime(time.Now()) defer func() { t.SetEndTime(time.Now()) }() diff --git a/internal/offline_download/tool/transfer.go b/internal/offline_download/tool/transfer.go index 8c7ab244..1d5ece61 100644 --- a/internal/offline_download/tool/transfer.go +++ b/internal/offline_download/tool/transfer.go @@ -32,6 +32,7 @@ type TransferTask struct { } func (t *TransferTask) Run() error { + t.ReinitCtx() t.ClearEndTime() t.SetStartTime(time.Now()) defer func() { t.SetEndTime(time.Now()) }() diff --git a/internal/task/base.go b/internal/task/base.go index 22b16741..c3703bd1 100644 --- a/internal/task/base.go +++ b/internal/task/base.go @@ -2,6 +2,7 @@ package task import ( "context" + "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/model" "github.com/xhofe/tache" "sync" @@ -66,6 +67,20 @@ func (t *TaskExtension) Ctx() context.Context { return t.ctx } +func (t *TaskExtension) ReinitCtx() { + if !conf.Conf.Tasks.AllowRetryCanceled { + return + } + select { + case <-t.Base.Ctx().Done(): + ctx, cancel := context.WithCancel(context.Background()) + t.SetCtx(ctx) + t.SetCancelFunc(cancel) + t.ctx = nil + default: + } +} + type TaskExtensionInfo interface { tache.TaskWithInfo GetCreator() *model.User From 5eff8cc7bffdbe5a20a37d1a9964bb391d42baaa Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Mon, 27 Jan 2025 20:20:09 +0800 Subject: [PATCH 095/187] feat(upload): support rapid upload on web (#7851) --- drivers/alist_v3/driver.go | 9 +++++++++ server/handles/fsup.go | 23 +++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/drivers/alist_v3/driver.go b/drivers/alist_v3/driver.go index d078c5fb..894bac64 100644 --- a/drivers/alist_v3/driver.go +++ b/drivers/alist_v3/driver.go @@ -189,6 +189,15 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt req.Header.Set("Authorization", d.Token) req.Header.Set("File-Path", path.Join(dstDir.GetPath(), stream.GetName())) req.Header.Set("Password", d.MetaPassword) + if md5 := stream.GetHash().GetHash(utils.MD5); len(md5) > 0 { + req.Header.Set("X-File-Md5", md5) + } + if sha1 := stream.GetHash().GetHash(utils.SHA1); len(sha1) > 0 { + req.Header.Set("X-File-Sha1", sha1) + } + if sha256 := stream.GetHash().GetHash(utils.SHA256); len(sha256) > 0 { + req.Header.Set("X-File-Sha256", sha256) + } req.ContentLength = stream.GetSize() // client := base.NewHttpClient() diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 563afbcd..15a6328b 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -2,6 +2,7 @@ package handles import ( "github.com/alist-org/alist/v3/internal/task" + "github.com/alist-org/alist/v3/pkg/utils" "io" "net/url" stdpath "path" @@ -55,11 +56,22 @@ func FsStream(c *gin.Context) { common.ErrorResp(c, err, 400) return } + h := make(map[*utils.HashType]string) + if md5 := c.GetHeader("X-File-Md5"); md5 != "" { + h[utils.MD5] = md5 + } + if sha1 := c.GetHeader("X-File-Sha1"); sha1 != "" { + h[utils.SHA1] = sha1 + } + if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" { + h[utils.SHA256] = sha256 + } s := &stream.FileStream{ Obj: &model.Object{ Name: name, Size: size, Modified: getLastModified(c), + HashInfo: utils.NewHashInfoByMap(h), }, Reader: c.Request.Body, Mimetype: c.GetHeader("Content-Type"), @@ -128,11 +140,22 @@ func FsForm(c *gin.Context) { } defer f.Close() dir, name := stdpath.Split(path) + h := make(map[*utils.HashType]string) + if md5 := c.GetHeader("X-File-Md5"); md5 != "" { + h[utils.MD5] = md5 + } + if sha1 := c.GetHeader("X-File-Sha1"); sha1 != "" { + h[utils.SHA1] = sha1 + } + if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" { + h[utils.SHA256] = sha256 + } s := stream.FileStream{ Obj: &model.Object{ Name: name, Size: file.Size, Modified: getLastModified(c), + HashInfo: utils.NewHashInfoByMap(h), }, Reader: f, Mimetype: file.Header.Get("Content-Type"), From 267120a8c8bdde8793ccc212fa311418c9520823 Mon Sep 17 00:00:00 2001 From: Shelton Zhu <498220739@qq.com> Date: Mon, 27 Jan 2025 20:20:55 +0800 Subject: [PATCH 096/187] fix(115): fix offline download (#7845 close #7794) * feat(115): use multi url for list files & change download url api * fix(115): fix offline download. (close #7794) --- drivers/115/driver.go | 2 +- drivers/115/util.go | 27 ++++++++++++------------ drivers/115_share/meta.go | 2 +- go.mod | 6 ++---- go.sum | 44 ++------------------------------------- 5 files changed, 19 insertions(+), 62 deletions(-) diff --git a/drivers/115/driver.go b/drivers/115/driver.go index 4f584cd7..0bf8a927 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -241,7 +241,7 @@ func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, err } func (d *Pan115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) { - return d.client.AddOfflineTaskURIs(uris, dstDir.GetID()) + return d.client.AddOfflineTaskURIs(uris, dstDir.GetID(), driver115.WithAppVer(appVer)) } func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, deleteFiles bool) error { diff --git a/drivers/115/util.go b/drivers/115/util.go index d7a1adff..84cbd88f 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -21,9 +21,9 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/aliyun/aliyun-oss-go-sdk/oss" + cipher "github.com/SheltonZhu/115driver/pkg/crypto/ec115" + crypto "github.com/SheltonZhu/115driver/pkg/crypto/m115" driver115 "github.com/SheltonZhu/115driver/pkg/driver" - crypto "github.com/gaoyb7/115drive-webdav/115" - "github.com/orzogc/fake115uploader/cipher" "github.com/pkg/errors" ) @@ -63,7 +63,7 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) { if d.PageSize <= 0 { d.PageSize = driver115.FileListLimit } - files, err := d.client.ListWithLimit(fileId, d.PageSize) + files, err := d.client.ListWithLimit(fileId, d.PageSize, driver115.WithMultiUrls()) if err != nil { return nil, err } @@ -108,7 +108,7 @@ func (d *Pan115) getUA() string { func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) { key := crypto.GenerateKey() result := driver115.DownloadResp{} - params, err := utils.Json.Marshal(map[string]string{"pickcode": pickCode}) + params, err := utils.Json.Marshal(map[string]string{"pick_code": pickCode}) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e data := crypto.Encode(params, key) bodyReader := strings.NewReader(url.Values{"data": []string{data}}.Encode()) - reqUrl := fmt.Sprintf("%s?t=%s", driver115.ApiDownloadGetUrl, driver115.Now().String()) + reqUrl := fmt.Sprintf("%s?t=%s", driver115.AndroidApiDownloadGetUrl, driver115.Now().String()) req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.Header.Set("Cookie", d.Cookie) @@ -145,19 +145,18 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e return nil, err } - downloadInfo := driver115.DownloadData{} + downloadInfo := struct { + Url string `json:"url"` + }{} if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil { return nil, err } - for _, info := range downloadInfo { - if info.FileSize < 0 { - return nil, driver115.ErrDownloadEmpty - } - info.Header = resp.Request.Header - return info, nil - } - return nil, driver115.ErrUnexpected + info := &driver115.DownloadInfo{} + info.PickCode = pickCode + info.Header = resp.Request.Header + info.Url.Url = downloadInfo.Url + return info, nil } func (c *Pan115) GenerateToken(fileID, preID, timeStamp, fileSize, signKey, signVal string) string { diff --git a/drivers/115_share/meta.go b/drivers/115_share/meta.go index b3d2cc1f..92f8bf0f 100644 --- a/drivers/115_share/meta.go +++ b/drivers/115_share/meta.go @@ -18,7 +18,7 @@ type Addition struct { var config = driver.Config{ Name: "115 Share", - DefaultRoot: "", + DefaultRoot: "0", // OnlyProxy: true, // OnlyLocal: true, CheckStatus: false, diff --git a/go.mod b/go.mod index 0693dcd3..2bf4ba3e 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.1 require ( github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 github.com/KirCute/sftpd-alist v0.0.12 - github.com/SheltonZhu/115driver v1.0.32 + github.com/SheltonZhu/115driver v1.0.34 github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4 github.com/alist-org/gofakes3 v0.0.7 @@ -29,7 +29,6 @@ require ( github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 github.com/foxxorcat/mopan-sdk-go v0.1.6 github.com/foxxorcat/weiyun-sdk-go v0.1.3 - github.com/gaoyb7/115drive-webdav v0.1.8 github.com/gin-contrib/cors v1.7.2 github.com/gin-gonic/gin v1.10.0 github.com/go-resty/resty/v2 v2.14.0 @@ -50,7 +49,6 @@ require ( github.com/minio/sio v0.4.0 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/ncw/swift/v2 v2.0.3 - github.com/orzogc/fake115uploader v0.6.2 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/pquerna/otp v1.4.0 @@ -103,6 +101,7 @@ require ( github.com/ipfs/boxo v0.12.0 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/klauspost/pgzip v1.2.6 // indirect + github.com/kr/text v0.2.0 // indirect github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect @@ -139,7 +138,6 @@ require ( github.com/blevesearch/zapx/v13 v13.3.10 // indirect github.com/blevesearch/zapx/v14 v14.3.10 // indirect github.com/blevesearch/zapx/v15 v15.3.13 // indirect - github.com/bluele/gcache v0.0.2 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/bytedance/sonic v1.11.6 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect diff --git a/go.sum b/go.sum index 9d92a935..db58dea2 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4 github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= -github.com/SheltonZhu/115driver v1.0.32 h1:Taw1bnfcPJZW0xTdhDvEbBS1tccif7J7DslRp2NkDyQ= -github.com/SheltonZhu/115driver v1.0.32/go.mod h1:XXFi23pyhAgzUE8dUEKdGvIdUQKi3wv6zR7C1Do40D8= +github.com/SheltonZhu/115driver v1.0.34 h1:zhMLp4vgq7GksqvSxQQDOVfK6EOHldQl4b2n8tnZ+EE= +github.com/SheltonZhu/115driver v1.0.34/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E= github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A= github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 h1:h6q5E9aMBhhdqouW81LozVPI1I+Pu6IxL2EKpfm5OjY= @@ -110,8 +110,6 @@ github.com/blevesearch/zapx/v15 v15.3.13 h1:6EkfaZiPlAxqXz0neniq35my6S48QI94W/wy github.com/blevesearch/zapx/v15 v15.3.13/go.mod h1:Turk/TNRKj9es7ZpKK95PS7f6D44Y7fAFy8F4LXQtGg= github.com/blevesearch/zapx/v16 v16.1.5 h1:b0sMcarqNFxuXvjoXsF8WtwVahnxyhEvBSRJi/AUHjU= github.com/blevesearch/zapx/v16 v16.1.5/go.mod h1:J4mSF39w1QELc11EWRSBFkPeZuO7r/NPKkHzDCoiaI8= -github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= -github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A= @@ -199,15 +197,12 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= -github.com/gaoyb7/115drive-webdav v0.1.8 h1:EJt4PSmcbvBY4KUh2zSo5p6fN9LZFNkIzuKejipubVw= -github.com/gaoyb7/115drive-webdav v0.1.8/go.mod h1:BKbeY6j8SKs3+rzBFFALznGxbPmefEm3vA+dGhqgOGU= github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w= github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc= github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw= github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= @@ -226,20 +221,14 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= -github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-resty/resty/v2 v2.14.0 h1:/rhkzsAqGQkozwfKS5aFAbb6TyKd3zyFRWcdRXLPCAU= github.com/go-resty/resty/v2 v2.14.0/go.mod h1:IW6mekUOsElt9C7oWr0XRt9BNSD6D5rr9mhk6NjmNHg= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= @@ -248,7 +237,6 @@ github.com/go-webauthn/webauthn v0.11.1 h1:5G/+dg91/VcaJHTtJUfwIlNJkLwbJCcnUc4W8 github.com/go-webauthn/webauthn v0.11.1/go.mod h1:YXRm1WG0OtUyDFaVAgB5KG7kVqW+6dYCJ7FTQH4SxEE= github.com/go-webauthn/x v0.1.12 h1:RjQ5cvApzyU/xLCiP+rub0PE4HBZsLggbxGR5ZpUf/A= github.com/go-webauthn/x v0.1.12/go.mod h1:XlRcGkNH8PT45TfeJYc6gqpOtiOendHhVmnOxh+5yHs= -github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -390,8 +378,6 @@ github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgSh github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -400,7 +386,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/larksuite/oapi-sdk-go/v3 v3.3.1 h1:DLQQEgHUAGZB6RVlceB1f6A94O206exxW2RIMH+gMUc= github.com/larksuite/oapi-sdk-go/v3 v3.3.1/go.mod h1:ZEplY+kwuIrj/nqw5uSCINNATcH3KdxSN7y+UxYY5fI= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -420,7 +405,6 @@ github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -481,19 +465,15 @@ github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= -github.com/orzogc/fake115uploader v0.6.2 h1:f4LzqeeXpmY7DjOMnzmAnnPTPMA/f/BUclq4ecffTvU= -github.com/orzogc/fake115uploader v0.6.2/go.mod h1:Mqqwv1+gUEjJhUfIQanco3DCTKp+7lSx8DJ3AoRwMoE= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -528,8 +508,6 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -548,7 +526,6 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= @@ -570,14 +547,12 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -603,8 +578,6 @@ github.com/u2takey/ffmpeg-go v0.5.0 h1:r7d86XuL7uLWJ5mzSeQ03uvjfIhiJYvsRAJFCW4uk github.com/u2takey/ffmpeg-go v0.5.0/go.mod h1:ruZWkvC1FEiUNjmROowOAps3ZcWxEiOpFoHCvk97kGc= github.com/u2takey/go-utils v0.3.1 h1:TaQTgmEZZeDHQFYfd+AdUT1cT4QJgJn/XVPELhHw4ys= github.com/u2takey/go-utils v0.3.1/go.mod h1:6e+v5vEZ/6gu12w/DC2ixZdZtCrNokVxD0JUklcqdCs= -github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= -github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -616,8 +589,6 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.37.1-0.20220607072126-8a320890c08d h1:xS9QTPgKl9ewGsAOPc+xW7DeStJDqYPfisDmeSCcbco= github.com/valyala/fasthttp v1.37.1-0.20220607072126-8a320890c08d/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I= -github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= -github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXorZG0KzTxbp0Cr1n3FEegfmyd9br1k= github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= @@ -669,11 +640,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= @@ -734,7 +702,6 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -779,7 +746,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -789,14 +755,11 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -843,7 +806,6 @@ golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= @@ -928,7 +890,6 @@ google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= @@ -950,7 +911,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo= From 99f39410f2aa34793f047fbe781bc03f726b4a35 Mon Sep 17 00:00:00 2001 From: Jiang Xiang <869914918@qq.com> Date: Mon, 27 Jan 2025 20:23:13 +0800 Subject: [PATCH 097/187] fix(s3): escape CopySource request header when copying files (#7860 close #7858) --- drivers/s3/util.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/s3/util.go b/drivers/s3/util.go index 31e658bd..99f271aa 100644 --- a/drivers/s3/util.go +++ b/drivers/s3/util.go @@ -4,6 +4,7 @@ import ( "context" "errors" "net/http" + "net/url" "path" "strings" @@ -198,7 +199,7 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error { dstKey := getKey(dst, false) input := &s3.CopyObjectInput{ Bucket: &d.Bucket, - CopySource: aws.String("/" + d.Bucket + "/" + srcKey), + CopySource: aws.String(url.PathEscape("/" + d.Bucket + "/" + srcKey)), Key: &dstKey, } _, err := d.client.CopyObject(input) From 258b8f520f467b7f7be7cc18d70f1e86de95f182 Mon Sep 17 00:00:00 2001 From: Jealous Date: Mon, 27 Jan 2025 20:25:39 +0800 Subject: [PATCH 098/187] feat(recursive-move): add `overwrite` option to preventing unintentional overwriting (#7868 closes #7382,#7719) * feat(recursive-move): add `overwrite` option to preventing unintentional overwriting * chore: rearrange code order --- server/handles/fsbatch.go | 204 ++++++++++++++++++++++---------------- 1 file changed, 116 insertions(+), 88 deletions(-) diff --git a/server/handles/fsbatch.go b/server/handles/fsbatch.go index fa7971df..dd7b7e47 100644 --- a/server/handles/fsbatch.go +++ b/server/handles/fsbatch.go @@ -3,6 +3,7 @@ package handles import ( "fmt" "regexp" + "slices" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" @@ -14,6 +15,121 @@ import ( "github.com/pkg/errors" ) +type RecursiveMoveReq struct { + SrcDir string `json:"src_dir"` + DstDir string `json:"dst_dir"` + Overwrite bool `json:"overwrite"` +} + +func FsRecursiveMove(c *gin.Context) { + var req RecursiveMoveReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + + user := c.MustGet("user").(*model.User) + if !user.CanMove() { + common.ErrorResp(c, errs.PermissionDenied, 403) + return + } + srcDir, err := user.JoinPath(req.SrcDir) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + dstDir, err := user.JoinPath(req.DstDir) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + + meta, err := op.GetNearestMeta(srcDir) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + return + } + } + c.Set("meta", meta) + + rootFiles, err := fs.List(c, srcDir, &fs.ListArgs{}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + + var existingFileNames []string + if !req.Overwrite { + dstFiles, err := fs.List(c, dstDir, &fs.ListArgs{}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + existingFileNames = make([]string, 0, len(dstFiles)) + for _, dstFile := range dstFiles { + existingFileNames = append(existingFileNames, dstFile.GetName()) + } + } + + // record the file path + filePathMap := make(map[model.Obj]string) + movingFiles := generic.NewQueue[model.Obj]() + movingFileNames := make([]string, 0, len(rootFiles)) + for _, file := range rootFiles { + movingFiles.Push(file) + filePathMap[file] = srcDir + } + + for !movingFiles.IsEmpty() { + + movingFile := movingFiles.Pop() + movingFilePath := filePathMap[movingFile] + movingFileName := fmt.Sprintf("%s/%s", movingFilePath, movingFile.GetName()) + if movingFile.IsDir() { + // directory, recursive move + subFilePath := movingFileName + subFiles, err := fs.List(c, movingFileName, &fs.ListArgs{Refresh: true}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + for _, subFile := range subFiles { + movingFiles.Push(subFile) + filePathMap[subFile] = subFilePath + } + } else { + + if movingFilePath == dstDir { + // same directory, don't move + continue + } + + if !req.Overwrite { + if slices.Contains(existingFileNames, movingFile.GetName()) { + common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", movingFile.GetName()), 403) + return + } + existingFileNames = append(existingFileNames, movingFile.GetName()) + } + + movingFileNames = append(movingFileNames, movingFileName) + } + + } + + for i, fileName := range movingFileNames { + // move + err := fs.Move(c, fileName, dstDir, len(movingFileNames) > i+1) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + } + + common.SuccessResp(c) +} + type BatchRenameReq struct { SrcDir string `json:"src_dir"` RenameObjects []struct { @@ -61,94 +177,6 @@ func FsBatchRename(c *gin.Context) { common.SuccessResp(c) } -type RecursiveMoveReq struct { - SrcDir string `json:"src_dir"` - DstDir string `json:"dst_dir"` -} - -func FsRecursiveMove(c *gin.Context) { - var req RecursiveMoveReq - if err := c.ShouldBind(&req); err != nil { - common.ErrorResp(c, err, 400) - return - } - - user := c.MustGet("user").(*model.User) - if !user.CanMove() { - common.ErrorResp(c, errs.PermissionDenied, 403) - return - } - srcDir, err := user.JoinPath(req.SrcDir) - if err != nil { - common.ErrorResp(c, err, 403) - return - } - dstDir, err := user.JoinPath(req.DstDir) - if err != nil { - common.ErrorResp(c, err, 403) - return - } - - meta, err := op.GetNearestMeta(srcDir) - if err != nil { - if !errors.Is(errors.Cause(err), errs.MetaNotFound) { - common.ErrorResp(c, err, 500, true) - return - } - } - c.Set("meta", meta) - - rootFiles, err := fs.List(c, srcDir, &fs.ListArgs{}) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - - // record the file path - filePathMap := make(map[model.Obj]string) - movingFiles := generic.NewQueue[model.Obj]() - for _, file := range rootFiles { - movingFiles.Push(file) - filePathMap[file] = srcDir - } - - for !movingFiles.IsEmpty() { - - movingFile := movingFiles.Pop() - movingFilePath := filePathMap[movingFile] - movingFileName := fmt.Sprintf("%s/%s", movingFilePath, movingFile.GetName()) - if movingFile.IsDir() { - // directory, recursive move - subFilePath := movingFileName - subFiles, err := fs.List(c, movingFileName, &fs.ListArgs{Refresh: true}) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - for _, subFile := range subFiles { - movingFiles.Push(subFile) - filePathMap[subFile] = subFilePath - } - } else { - - if movingFilePath == dstDir { - // same directory, don't move - continue - } - - // move - err := fs.Move(c, movingFileName, dstDir, movingFiles.IsEmpty()) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - } - - } - - common.SuccessResp(c) -} - type RegexRenameReq struct { SrcDir string `json:"src_dir"` SrcNameRegex string `json:"src_name_regex"` From bdd9774aa7684f7eb66f6758d537ee161ee14078 Mon Sep 17 00:00:00 2001 From: Sakana Date: Mon, 27 Jan 2025 20:28:44 +0800 Subject: [PATCH 099/187] feat(github_releases): add support for github_releases driver (#7844 close #7842) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(github_releases): 添加对 GitHub Releases 的支持 * feat(github_releases): 增加目录大小和更新时间,增加请求缓存 * Feat(github_releases): 可选填入 GitHub token 来提高速率限制或访问私有仓库 * Fix(github_releases): 修复仓库无权限或不存在时的异常 * feat(github_releases): 支持显示所有版本,开启后不显示文件夹大小 * feat(github_releases): 兼容无子目录 --- drivers/all.go | 1 + drivers/github_releases/driver.go | 153 +++++++++++++++++++++ drivers/github_releases/meta.go | 34 +++++ drivers/github_releases/types.go | 68 ++++++++++ drivers/github_releases/util.go | 217 ++++++++++++++++++++++++++++++ 5 files changed, 473 insertions(+) create mode 100644 drivers/github_releases/driver.go create mode 100644 drivers/github_releases/meta.go create mode 100644 drivers/github_releases/types.go create mode 100644 drivers/github_releases/util.go diff --git a/drivers/all.go b/drivers/all.go index 8b253a08..bd051168 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -25,6 +25,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/febbox" _ "github.com/alist-org/alist/v3/drivers/ftp" _ "github.com/alist-org/alist/v3/drivers/github" + _ "github.com/alist-org/alist/v3/drivers/github_releases" _ "github.com/alist-org/alist/v3/drivers/google_drive" _ "github.com/alist-org/alist/v3/drivers/google_photo" _ "github.com/alist-org/alist/v3/drivers/halalcloud" diff --git a/drivers/github_releases/driver.go b/drivers/github_releases/driver.go new file mode 100644 index 00000000..79f2b582 --- /dev/null +++ b/drivers/github_releases/driver.go @@ -0,0 +1,153 @@ +package github_releases + +import ( + "context" + "fmt" + "net/http" + "time" + + "strings" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" +) + +type GithubReleases struct { + model.Storage + Addition + + releases []Release +} + +func (d *GithubReleases) Config() driver.Config { + return config +} + +func (d *GithubReleases) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *GithubReleases) Init(ctx context.Context) error { + SetHeader(d.Addition.Token) + repos, err := ParseRepos(d.Addition.RepoStructure, d.Addition.ShowAllVersion) + if err != nil { + return err + } + d.releases = repos + return nil +} + +func (d *GithubReleases) Drop(ctx context.Context) error { + ClearCache() + return nil +} + +func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + files := make([]File, 0) + path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/")) + + for _, repo := range d.releases { + if repo.Path == path { // 与仓库路径相同 + resp, err := GetRepoReleaseInfo(repo.RepoName, repo.ID, path, d.Storage.CacheExpiration) + if err != nil { + return nil, err + } + files = append(files, resp.Files...) + + if d.Addition.ShowReadme { + resp, err := GetGithubOtherFile(repo.RepoName, path, d.Storage.CacheExpiration) + if err != nil { + return nil, err + } + files = append(files, *resp...) + } + + } else if strings.HasPrefix(repo.Path, path) { // 仓库路径是目录的子目录 + nextDir := GetNextDir(repo.Path, path) + if nextDir == "" { + continue + } + if d.Addition.ShowAllVersion { + files = append(files, File{ + FileName: nextDir, + Size: 0, + CreateAt: time.Time{}, + UpdateAt: time.Time{}, + Url: "", + Type: "dir", + Path: fmt.Sprintf("%s/%s", path, nextDir), + }) + continue + } + + repo, _ := GetRepoReleaseInfo(repo.RepoName, repo.Version, path, d.Storage.CacheExpiration) + + hasSameDir := false + for index, file := range files { + if file.FileName == nextDir { + hasSameDir = true + files[index].Size += repo.Size + files[index].UpdateAt = func(a time.Time, b time.Time) time.Time { + if a.After(b) { + return a + } + return b + }(files[index].UpdateAt, repo.UpdateAt) + break + } + } + + if !hasSameDir { + files = append(files, File{ + FileName: nextDir, + Size: repo.Size, + CreateAt: repo.CreateAt, + UpdateAt: repo.UpdateAt, + Url: repo.Url, + Type: "dir", + Path: fmt.Sprintf("%s/%s", path, nextDir), + }) + } + } + } + + return utils.SliceConvert(files, func(src File) (model.Obj, error) { + return src, nil + }) +} + +func (d *GithubReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + link := model.Link{ + URL: file.GetID(), + Header: http.Header{}, + } + return &link, nil +} + +func (d *GithubReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + return nil, errs.NotImplement +} + +func (d *GithubReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + return nil, errs.NotImplement +} + +func (d *GithubReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + return nil, errs.NotImplement +} + +func (d *GithubReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + return nil, errs.NotImplement +} + +func (d *GithubReleases) Remove(ctx context.Context, obj model.Obj) error { + return errs.NotImplement +} + +func (d *GithubReleases) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + return nil, errs.NotImplement +} + +var _ driver.Driver = (*GithubReleases)(nil) diff --git a/drivers/github_releases/meta.go b/drivers/github_releases/meta.go new file mode 100644 index 00000000..ca6ca5dc --- /dev/null +++ b/drivers/github_releases/meta.go @@ -0,0 +1,34 @@ +package github_releases + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootID + RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"/path/to/alist-gh:alistGo/alist\n/path/to2/alist-web-gh:AlistGo/alist-web" help:"structure:[path:]org/repo"` + ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"` + Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"` + ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"` +} + +var config = driver.Config{ + Name: "GitHub Releases", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &GithubReleases{} + }) +} diff --git a/drivers/github_releases/types.go b/drivers/github_releases/types.go new file mode 100644 index 00000000..733460dc --- /dev/null +++ b/drivers/github_releases/types.go @@ -0,0 +1,68 @@ +package github_releases + +import ( + "time" + + "github.com/alist-org/alist/v3/pkg/utils" +) + +type File struct { + FileName string `json:"name"` + Size int64 `json:"size"` + CreateAt time.Time `json:"time"` + UpdateAt time.Time `json:"chtime"` + Url string `json:"url"` + Type string `json:"type"` + Path string `json:"path"` +} + +func (f File) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + +func (f File) GetPath() string { + return f.Path +} + +func (f File) GetSize() int64 { + return f.Size +} + +func (f File) GetName() string { + return f.FileName +} + +func (f File) ModTime() time.Time { + return f.UpdateAt +} + +func (f File) CreateTime() time.Time { + return f.CreateAt +} + +func (f File) IsDir() bool { + return f.Type == "dir" +} + +func (f File) GetID() string { + return f.Url +} + +func (f File) Thumb() string { + return "" +} + +type ReleasesData struct { + Files []File `json:"files"` + Size int64 `json:"size"` + UpdateAt time.Time `json:"chtime"` + CreateAt time.Time `json:"time"` + Url string `json:"url"` +} + +type Release struct { + Path string // 挂载路径 + RepoName string // 仓库名称 + Version string // 版本号, tag + ID string // 版本ID +} diff --git a/drivers/github_releases/util.go b/drivers/github_releases/util.go new file mode 100644 index 00000000..b2d79c0b --- /dev/null +++ b/drivers/github_releases/util.go @@ -0,0 +1,217 @@ +package github_releases + +import ( + "fmt" + "regexp" + "strings" + "sync" + "time" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/go-resty/resty/v2" + jsoniter "github.com/json-iterator/go" + log "github.com/sirupsen/logrus" +) + +var ( + cache = make(map[string]*resty.Response) + created = make(map[string]time.Time) + mu sync.Mutex + req *resty.Request +) + +// 解析仓库列表 +func ParseRepos(text string, allVersion bool) ([]Release, error) { + lines := strings.Split(text, "\n") + var repos []Release + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + parts := strings.Split(line, ":") + path, repo := "", "" + if len(parts) == 1 { + path = "/" + repo = parts[0] + } else if len(parts) == 2 { + path = fmt.Sprintf("/%s", strings.Trim(parts[0], "/")) + repo = parts[1] + } else { + return nil, fmt.Errorf("invalid format: %s", line) + } + + if allVersion { + releases, _ := GetAllVersion(repo, path) + repos = append(repos, *releases...) + } else { + repos = append(repos, Release{ + Path: path, + RepoName: repo, + Version: "latest", + ID: "latest", + }) + } + + } + return repos, nil +} + +// 获取下一级目录 +func GetNextDir(wholePath string, basePath string) string { + if !strings.HasSuffix(basePath, "/") { + basePath += "/" + } + if !strings.HasPrefix(wholePath, basePath) { + return "" + } + remainingPath := strings.TrimLeft(strings.TrimPrefix(wholePath, basePath), "/") + if remainingPath != "" { + parts := strings.Split(remainingPath, "/") + return parts[0] + } + return "" +} + +// 发送 GET 请求 +func GetRequest(url string, cacheExpiration int) (*resty.Response, error) { + mu.Lock() + if res, ok := cache[url]; ok && time.Now().Before(created[url].Add(time.Duration(cacheExpiration)*time.Minute)) { + mu.Unlock() + return res, nil + } + mu.Unlock() + + res, err := req.Get(url) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + log.Warn("failed to get request: ", res.StatusCode(), res.String()) + } + + mu.Lock() + cache[url] = res + created[url] = time.Now() + mu.Unlock() + + return res, nil +} + +// 获取 README、LICENSE 等文件 +func GetGithubOtherFile(repo string, basePath string, cacheExpiration int) (*[]File, error) { + url := fmt.Sprintf("https://api.github.com/repos/%s/contents/", strings.Trim(repo, "/")) + res, _ := GetRequest(url, cacheExpiration) + body := jsoniter.Get(res.Body()) + var files []File + for i := 0; i < body.Size(); i++ { + filename := body.Get(i, "name").ToString() + + re := regexp.MustCompile(`(?i)^(.*\.md|LICENSE)$`) + + if !re.MatchString(filename) { + continue + } + + files = append(files, File{ + FileName: filename, + Size: body.Get(i, "size").ToInt64(), + CreateAt: time.Time{}, + UpdateAt: time.Now(), + Url: body.Get(i, "download_url").ToString(), + Type: body.Get(i, "type").ToString(), + Path: fmt.Sprintf("%s/%s", basePath, filename), + }) + } + return &files, nil +} + +// 获取 GitHub Release 详细信息 +func GetRepoReleaseInfo(repo string, version string, basePath string, cacheExpiration int) (*ReleasesData, error) { + url := fmt.Sprintf("https://api.github.com/repos/%s/releases/%s", strings.Trim(repo, "/"), version) + res, _ := GetRequest(url, cacheExpiration) + body := res.Body() + + if jsoniter.Get(res.Body(), "status").ToInt64() != 0 { + return &ReleasesData{}, fmt.Errorf("%s", res.String()) + } + + assets := jsoniter.Get(res.Body(), "assets") + var files []File + + for i := 0; i < assets.Size(); i++ { + filename := assets.Get(i, "name").ToString() + + files = append(files, File{ + FileName: filename, + Size: assets.Get(i, "size").ToInt64(), + Url: assets.Get(i, "browser_download_url").ToString(), + Type: assets.Get(i, "content_type").ToString(), + Path: fmt.Sprintf("%s/%s", basePath, filename), + + CreateAt: func() time.Time { + t, _ := time.Parse(time.RFC3339, assets.Get(i, "created_at").ToString()) + return t + }(), + UpdateAt: func() time.Time { + t, _ := time.Parse(time.RFC3339, assets.Get(i, "updated_at").ToString()) + return t + }(), + }) + } + + return &ReleasesData{ + Files: files, + Url: jsoniter.Get(body, "html_url").ToString(), + + Size: func() int64 { + size := int64(0) + for _, file := range files { + size += file.Size + } + return size + }(), + UpdateAt: func() time.Time { + t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "published_at").ToString()) + return t + }(), + CreateAt: func() time.Time { + t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "created_at").ToString()) + return t + }(), + }, nil +} + +// 获取所有的版本号 +func GetAllVersion(repo string, path string) (*[]Release, error) { + url := fmt.Sprintf("https://api.github.com/repos/%s/releases", strings.Trim(repo, "/")) + res, _ := GetRequest(url, 0) + body := jsoniter.Get(res.Body()) + releases := make([]Release, 0) + for i := 0; i < body.Size(); i++ { + version := body.Get(i, "tag_name").ToString() + releases = append(releases, Release{ + Path: fmt.Sprintf("%s/%s", path, version), + Version: version, + RepoName: repo, + ID: body.Get(i, "id").ToString(), + }) + } + return &releases, nil +} + +func ClearCache() { + mu.Lock() + cache = make(map[string]*resty.Response) + created = make(map[string]time.Time) + mu.Unlock() +} + +func SetHeader(token string) { + req = base.RestyClient.R() + if token != "" { + req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)) + } + req.SetHeader("Accept", "application/vnd.github+json") + req.SetHeader("X-GitHub-Api-Version", "2022-11-28") +} From fd51f34efa70005ffd69378ddb05183f405911ee Mon Sep 17 00:00:00 2001 From: Snowykami Date: Mon, 27 Jan 2025 20:47:52 +0800 Subject: [PATCH 100/187] feat(misskey): add misskey driver (#7864) --- drivers/all.go | 1 + drivers/misskey/driver.go | 74 +++++++++++ drivers/misskey/meta.go | 35 ++++++ drivers/misskey/types.go | 35 ++++++ drivers/misskey/util.go | 256 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 401 insertions(+) create mode 100644 drivers/misskey/driver.go create mode 100644 drivers/misskey/meta.go create mode 100644 drivers/misskey/types.go create mode 100644 drivers/misskey/util.go diff --git a/drivers/all.go b/drivers/all.go index bd051168..2746e1bf 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -37,6 +37,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/local" _ "github.com/alist-org/alist/v3/drivers/mediatrack" _ "github.com/alist-org/alist/v3/drivers/mega" + _ "github.com/alist-org/alist/v3/drivers/misskey" _ "github.com/alist-org/alist/v3/drivers/mopan" _ "github.com/alist-org/alist/v3/drivers/netease_music" _ "github.com/alist-org/alist/v3/drivers/onedrive" diff --git a/drivers/misskey/driver.go b/drivers/misskey/driver.go new file mode 100644 index 00000000..29797a01 --- /dev/null +++ b/drivers/misskey/driver.go @@ -0,0 +1,74 @@ +package misskey + +import ( + "context" + "strings" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" +) + +type Misskey struct { + model.Storage + Addition +} + +func (d *Misskey) Config() driver.Config { + return config +} + +func (d *Misskey) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Misskey) Init(ctx context.Context) error { + d.Endpoint = strings.TrimSuffix(d.Endpoint, "/") + if d.Endpoint == "" || d.AccessToken == "" { + return errs.EmptyToken + } else { + return nil + } +} + +func (d *Misskey) Drop(ctx context.Context) error { + return nil +} + +func (d *Misskey) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + return d.list(dir) +} + +func (d *Misskey) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + return d.link(file) +} + +func (d *Misskey) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + return d.makeDir(parentDir, dirName) +} + +func (d *Misskey) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + return d.move(srcObj, dstDir) +} + +func (d *Misskey) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + return d.rename(srcObj, newName) +} + +func (d *Misskey) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + return d.copy(srcObj, dstDir) +} + +func (d *Misskey) Remove(ctx context.Context, obj model.Obj) error { + return d.remove(obj) +} + +func (d *Misskey) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + return d.put(dstDir, stream, up) +} + +//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*Misskey)(nil) diff --git a/drivers/misskey/meta.go b/drivers/misskey/meta.go new file mode 100644 index 00000000..b8a80c15 --- /dev/null +++ b/drivers/misskey/meta.go @@ -0,0 +1,35 @@ +package misskey + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + driver.RootPath + // define other + // Field string `json:"field" type:"select" required:"true" options:"a,b,c" default:"a"` + Endpoint string `json:"endpoint" required:"true" default:"https://misskey.io"` + AccessToken string `json:"access_token" required:"true"` +} + +var config = driver.Config{ + Name: "Misskey", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "/", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Misskey{} + }) +} diff --git a/drivers/misskey/types.go b/drivers/misskey/types.go new file mode 100644 index 00000000..e9adc8d2 --- /dev/null +++ b/drivers/misskey/types.go @@ -0,0 +1,35 @@ +package misskey + +type Resp struct { + Code int + Raw []byte +} + +type Properties struct { + Width int `json:"width"` + Height int `json:"height"` +} + +type MFile struct { + ID string `json:"id"` + CreatedAt string `json:"createdAt"` + Name string `json:"name"` + Type string `json:"type"` + MD5 string `json:"md5"` + Size int64 `json:"size"` + IsSensitive bool `json:"isSensitive"` + Blurhash string `json:"blurhash"` + Properties Properties `json:"properties"` + URL string `json:"url"` + ThumbnailURL string `json:"thumbnailUrl"` + Comment *string `json:"comment"` + FolderID *string `json:"folderId"` + Folder MFolder `json:"folder"` +} + +type MFolder struct { + ID string `json:"id"` + CreatedAt string `json:"createdAt"` + Name string `json:"name"` + ParentID *string `json:"parentId"` +} diff --git a/drivers/misskey/util.go b/drivers/misskey/util.go new file mode 100644 index 00000000..4d5a3b4d --- /dev/null +++ b/drivers/misskey/util.go @@ -0,0 +1,256 @@ +package misskey + +import ( + "bytes" + "context" + "errors" + "io" + "time" + + "github.com/go-resty/resty/v2" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" +) + +// Base layer methods + +func (d *Misskey) request(path, method string, callback base.ReqCallback, resp interface{}) error { + url := d.Endpoint + "/api/drive" + path + req := base.RestyClient.R() + + req.SetAuthToken(d.AccessToken).SetHeader("Content-Type", "application/json") + + if callback != nil { + callback(req) + } else { + req.SetBody("{}") + } + + req.SetResult(resp) + + // 启用调试模式 + req.EnableTrace() + + response, err := req.Execute(method, url) + if err != nil { + return err + } + if !response.IsSuccess() { + return errors.New(response.String()) + } + return nil +} + +func (d *Misskey) getThumb(ctx context.Context, obj model.Obj) (io.Reader, error) { + // TODO return the thumb of obj, optional + return nil, errs.NotImplement +} + +func setBody(body interface{}) base.ReqCallback { + return func(req *resty.Request) { + req.SetBody(body) + } +} + +func handleFolderId(dir model.Obj) interface{} { + if dir.GetID() == "" { + return nil + } + return dir.GetID() +} + +// API layer methods + +func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) { + var files []MFile + var body map[string]string + if dir.GetPath() != "/" { + body = map[string]string{"folderId": dir.GetID()} + } else { + body = map[string]string{} + } + err := d.request("/files", "POST", setBody(body), &files) + if err != nil { + return []model.Obj{}, err + } + return utils.SliceConvert(files, func(src MFile) (model.Obj, error) { + return mFile2Object(src), nil + }) +} + +func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) { + var folders []MFolder + var body map[string]string + if dir.GetPath() != "/" { + body = map[string]string{"folderId": dir.GetID()} + } else { + body = map[string]string{} + } + err := d.request("/folders", "POST", setBody(body), &folders) + if err != nil { + return []model.Obj{}, err + } + return utils.SliceConvert(folders, func(src MFolder) (model.Obj, error) { + return mFolder2Object(src), nil + }) +} + +func (d *Misskey) list(dir model.Obj) ([]model.Obj, error) { + files, _ := d.getFiles(dir) + folders, _ := d.getFolders(dir) + return append(files, folders...), nil +} + +func (d *Misskey) link(file model.Obj) (*model.Link, error) { + var mFile MFile + err := d.request("/files/show", "POST", setBody(map[string]string{"fileId": file.GetID()}), &mFile) + if err != nil { + return nil, err + } + return &model.Link{ + URL: mFile.URL, + }, nil +} + +func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error) { + var folder MFolder + err := d.request("/folders/create", "POST", setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder) + if err != nil { + return nil, err + } + return mFolder2Object(folder), nil +} + +func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) { + if srcObj.IsDir() { + var folder MFolder + err := d.request("/folders/update", "POST", setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder) + return mFolder2Object(folder), err + } else { + var file MFile + err := d.request("/files/update", "POST", setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file) + return mFile2Object(file), err + } +} + +func (d *Misskey) rename(srcObj model.Obj, newName string) (model.Obj, error) { + if srcObj.IsDir() { + var folder MFolder + err := d.request("/folders/update", "POST", setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder) + return mFolder2Object(folder), err + } else { + var file MFile + err := d.request("/files/update", "POST", setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file) + return mFile2Object(file), err + } +} + +func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) { + if srcObj.IsDir() { + folder, err := d.makeDir(dstDir, srcObj.GetName()) + if err != nil { + return nil, err + } + list, err := d.list(srcObj) + if err != nil { + return nil, err + } + for _, obj := range list { + _, err := d.copy(obj, folder) + if err != nil { + return nil, err + } + } + return folder, nil + } else { + var file MFile + url, err := d.link(srcObj) + if err != nil { + return nil, err + } + err = d.request("/files/upload-from-url", "POST", setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file) + if err != nil { + return nil, err + } + return mFile2Object(file), nil + } +} + +func (d *Misskey) remove(obj model.Obj) error { + if obj.IsDir() { + err := d.request("/folders/delete", "POST", setBody(map[string]string{"folderId": obj.GetID()}), nil) + return err + } else { + err := d.request("/files/delete", "POST", setBody(map[string]string{"fileId": obj.GetID()}), nil) + return err + } +} + +func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + var file MFile + + fileContent, err := io.ReadAll(stream) + if err != nil { + return nil, err + } + + req := base.RestyClient.R(). + SetFileReader("file", stream.GetName(), io.NopCloser(bytes.NewReader(fileContent))). + SetFormData(map[string]string{ + "folderId": handleFolderId(dstDir).(string), + "name": stream.GetName(), + "comment": "", + "isSensitive": "false", + "force": "false", + }). + SetResult(&file).SetAuthToken(d.AccessToken) + + resp, err := req.Post(d.Endpoint + "/api/drive/files/create") + if err != nil { + return nil, err + } + if !resp.IsSuccess() { + return nil, errors.New(resp.String()) + } + + return mFile2Object(file), nil +} + +func mFile2Object(file MFile) *model.ObjThumbURL { + ctime, err := time.Parse(time.RFC3339, file.CreatedAt) + if err != nil { + ctime = time.Time{} + } + return &model.ObjThumbURL{ + Object: model.Object{ + ID: file.ID, + Name: file.Name, + Ctime: ctime, + IsFolder: false, + Size: file.Size, + }, + Thumbnail: model.Thumbnail{ + Thumbnail: file.ThumbnailURL, + }, + Url: model.Url{ + Url: file.URL, + }, + } +} + +func mFolder2Object(folder MFolder) *model.Object { + ctime, err := time.Parse(time.RFC3339, folder.CreatedAt) + if err != nil { + ctime = time.Time{} + } + return &model.Object{ + ID: folder.ID, + Name: folder.Name, + Ctime: ctime, + IsFolder: true, + } +} From 027edcbe536230c4607f1d73ccff85c666ed146c Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Mon, 27 Jan 2025 20:49:24 +0800 Subject: [PATCH 101/187] refactor(patch): execute all patches in dev version (#7807) --- internal/bootstrap/patch.go | 6 ++++++ internal/bootstrap/patch/v3_41_0/grant_permission.go | 11 ++++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/internal/bootstrap/patch.go b/internal/bootstrap/patch.go index 2d22d1b6..5c7ca758 100644 --- a/internal/bootstrap/patch.go +++ b/internal/bootstrap/patch.go @@ -2,6 +2,7 @@ package bootstrap import ( "fmt" + "github.com/alist-org/alist/v3/internal/bootstrap/patch" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/pkg/utils" @@ -40,6 +41,11 @@ func compareVersion(majorA, minorA, patchNumA, majorB, minorB, patchNumB int) bo func InitUpgradePatch() { if !strings.HasPrefix(conf.Version, "v") { + for _, vp := range patch.UpgradePatches { + for i, p := range vp.Patches { + safeCall(vp.Version, i, p) + } + } return } if LastLaunchedVersion == conf.Version { diff --git a/internal/bootstrap/patch/v3_41_0/grant_permission.go b/internal/bootstrap/patch/v3_41_0/grant_permission.go index e62d1e8f..60d8ab4f 100644 --- a/internal/bootstrap/patch/v3_41_0/grant_permission.go +++ b/internal/bootstrap/patch/v3_41_0/grant_permission.go @@ -11,14 +11,11 @@ import ( // PR AlistGo/alist#7817. func GrantAdminPermissions() { admin, err := op.GetAdmin() + if err == nil && (admin.Permission & 0x33FF) == 0 { + admin.Permission |= 0x33FF + err = op.UpdateUser(admin) + } if err != nil { utils.Log.Errorf("Cannot grant permissions to admin: %v", err) } - if (admin.Permission & 0x33FF) == 0 { - admin.Permission |= 0x33FF - err = op.UpdateUser(admin) - if err != nil { - utils.Log.Errorf("Cannot grant permissions to admin: %v", err) - } - } } From 226c34929a8bf7154bcf6c9f701c78da356c7ce5 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Mon, 27 Jan 2025 20:59:58 +0800 Subject: [PATCH 102/187] feat(ci): add build info for beta release --- .github/workflows/beta_release.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/beta_release.yml b/.github/workflows/beta_release.yml index c9cb7475..3c52b4c4 100644 --- a/.github/workflows/beta_release.yml +++ b/.github/workflows/beta_release.yml @@ -87,12 +87,18 @@ jobs: run: bash build.sh dev web - name: Build - id: test-action uses: go-cross/cgo-actions@v1 with: targets: ${{ matrix.target }} musl-target-format: $os-$musl-$arch out-dir: build + x-flags: | + github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at + github.com/alist-org/alist/v3/internal/conf.GoVersion=$go_version + github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe + github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit + github.com/alist-org/alist/v3/internal/conf.Version=$tag + github.com/alist-org/alist/v3/internal/conf.WebVersion=dev - name: Compress run: | From f88fd83d4ac3372076215abf6fc2ccabde679d2b Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Tue, 28 Jan 2025 18:55:56 +0800 Subject: [PATCH 103/187] feat(ci): use `go-cross/cgo-actions` for dev build --- .github/workflows/build.yml | 44 ++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b059a20b..fe037f43 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,14 +15,17 @@ jobs: strategy: matrix: platform: [ubuntu-latest] - go-version: [ '1.21' ] + target: + - darwin-amd64 + - darwin-arm64 + - windows-amd64 + - linux-arm64-musl + - linux-amd64-musl + - windows-arm64 + - android-arm64 name: Build runs-on: ${{ matrix.platform }} steps: - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ matrix.go-version }} - name: Checkout uses: actions/checkout@v4 @@ -30,19 +33,30 @@ jobs: - uses: benjlevesque/short-sha@v3.0 id: short-sha - - name: Install dependencies - run: | - sudo snap install zig --classic --beta - docker pull crazymax/xgo:latest - go install github.com/crazy-max/xgo@latest - sudo apt install upx + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '1.22' + + - name: Setup web + run: bash build.sh dev web - name: Build - run: | - bash build.sh dev + uses: go-cross/cgo-actions@v1 + with: + targets: ${{ matrix.target }} + musl-target-format: $os-$musl-$arch + out-dir: build + x-flags: | + github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at + github.com/alist-org/alist/v3/internal/conf.GoVersion=$go_version + github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe + github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit + github.com/alist-org/alist/v3/internal/conf.Version=$tag + github.com/alist-org/alist/v3/internal/conf.WebVersion=dev - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: alist_${{ env.SHA }} - path: dist \ No newline at end of file + name: alist_${{ env.SHA }}_${{ matrix.target }} + path: build/* \ No newline at end of file From d53eecc2292e84681e8a8e8641125b52f8c88954 Mon Sep 17 00:00:00 2001 From: Jiang Xiang <869914918@qq.com> Date: Thu, 30 Jan 2025 11:24:07 +0800 Subject: [PATCH 104/187] fix(febbox): panic due to slice out of range (#7898 close #7889) --- drivers/febbox/util.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/febbox/util.go b/drivers/febbox/util.go index ac072edb..ad2efe07 100644 --- a/drivers/febbox/util.go +++ b/drivers/febbox/util.go @@ -3,6 +3,7 @@ package febbox import ( "encoding/json" "errors" + "fmt" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/op" "github.com/go-resty/resty/v2" @@ -135,6 +136,9 @@ func (d *FebBox) getDownloadLink(id string, ip string) (string, error) { if err = json.Unmarshal(res, &fileDownloadResp); err != nil { return "", err } + if len(fileDownloadResp.Data) == 0 { + return "", fmt.Errorf("can not get download link, code:%d, msg:%s", fileDownloadResp.Code, fileDownloadResp.Msg) + } return fileDownloadResp.Data[0].DownloadURL, nil } From b9f397d29f0a4e75c72564da40d0f297ed8c5626 Mon Sep 17 00:00:00 2001 From: abc1763613206 Date: Thu, 30 Jan 2025 11:25:41 +0800 Subject: [PATCH 105/187] fix(139): restore the `Account` handling, partially reverts #7850 (#7900 close #7784) --- drivers/139/driver.go | 47 ++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/drivers/139/driver.go b/drivers/139/driver.go index cf64a8fd..1e2ba9c4 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -2,11 +2,13 @@ package _139 import ( "context" + "encoding/base64" "fmt" "io" "net/http" "path" "strconv" + "strings" "time" "github.com/alist-org/alist/v3/drivers/base" @@ -69,29 +71,28 @@ func (d *Yun139) Init(ctx context.Context) error { default: return errs.NotImplement } - // if d.ref != nil { - // return nil - // } - // decode, err := base64.StdEncoding.DecodeString(d.Authorization) - // if err != nil { - // return err - // } - // decodeStr := string(decode) - // splits := strings.Split(decodeStr, ":") - // if len(splits) < 2 { - // return fmt.Errorf("authorization is invalid, splits < 2") - // } - // d.Account = splits[1] - // _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ - // "qryUserExternInfoReq": base.Json{ - // "commonAccountInfo": base.Json{ - // "account": d.getAccount(), - // "accountType": 1, - // }, - // }, - // }, nil) - // return err - return nil + if d.ref != nil { + return nil + } + decode, err := base64.StdEncoding.DecodeString(d.Authorization) + if err != nil { + return err + } + decodeStr := string(decode) + splits := strings.Split(decodeStr, ":") + if len(splits) < 2 { + return fmt.Errorf("authorization is invalid, splits < 2") + } + d.Account = splits[1] + _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ + "qryUserExternInfoReq": base.Json{ + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + }, + }, nil) + return err } func (d *Yun139) InitReference(storage driver.Driver) error { From 779c293f04a387cfef210b83632aeeb7c5fb69de Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Sat, 1 Feb 2025 17:29:55 +0800 Subject: [PATCH 106/187] fix(driver): implement canceling and updating progress for putting for some drivers (#7847) * fix(driver): additionally implement canceling and updating progress for putting for some drivers * refactor: add driver archive api into template * fix(123): use built-in MD5 to avoid caching full * . * fix build failed --- drivers/115/driver.go | 4 +- drivers/115/util.go | 31 ++++++-- drivers/123/driver.go | 58 ++++++++------ drivers/123/upload.go | 4 +- drivers/alist_v3/driver.go | 18 +++-- drivers/chaoxing/driver.go | 16 +++- drivers/ftp/driver.go | 14 +++- drivers/github/driver.go | 17 +++-- drivers/github/util.go | 16 ---- drivers/ilanzou/driver.go | 36 +++++---- drivers/ipfs_api/driver.go | 11 ++- drivers/kodbox/driver.go | 16 ++-- drivers/lanzou/driver.go | 10 ++- drivers/mediatrack/driver.go | 25 +++--- drivers/netease_music/driver.go | 2 +- drivers/netease_music/types.go | 16 ++++ drivers/netease_music/upload.go | 13 +++- drivers/netease_music/util.go | 32 ++++++-- drivers/pikpak/driver.go | 4 +- drivers/pikpak/util.go | 32 ++++++-- drivers/quqi/driver.go | 13 +++- drivers/s3/driver.go | 19 +++-- drivers/seafile/driver.go | 12 ++- drivers/template/driver.go | 24 +++++- drivers/thunder/driver.go | 22 +++--- drivers/thunderx/driver.go | 22 +++--- drivers/trainbit/driver.go | 18 ++--- drivers/trainbit/util.go | 11 --- drivers/uss/driver.go | 14 +++- drivers/webdav/driver.go | 16 ++-- drivers/weiyun/driver.go | 130 +++++++++++++++++--------------- drivers/wopan/driver.go | 1 + drivers/yandex_disk/driver.go | 16 ++-- internal/driver/driver.go | 6 +- internal/stream/stream.go | 14 ++++ 35 files changed, 457 insertions(+), 256 deletions(-) diff --git a/drivers/115/driver.go b/drivers/115/driver.go index 0bf8a927..0dcb64d8 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -215,12 +215,12 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr var uploadResult *UploadResult // 闪传失败,上传 if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传 - if uploadResult, err = d.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID); err != nil { + if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil { return nil, err } } else { // 分片上传 - if uploadResult, err = d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID); err != nil { + if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil { return nil, err } } diff --git a/drivers/115/util.go b/drivers/115/util.go index 84cbd88f..4d3cdd93 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -2,17 +2,21 @@ package _115 import ( "bytes" + "context" "crypto/md5" "crypto/tls" "encoding/hex" "encoding/json" "fmt" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "net/url" "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/alist-org/alist/v3/internal/conf" @@ -271,7 +275,7 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri } // UploadByOSS use aliyun sdk to upload -func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dirID string) (*UploadResult, error) { +func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) { ossToken, err := c.client.GetOSSToken() if err != nil { return nil, err @@ -286,6 +290,13 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir } var bodyBytes []byte + r := &stream.ReaderWithCtx{ + Reader: &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }, + Ctx: ctx, + } if err = bucket.PutObject(params.Object, r, append( driver115.OssOption(params, ossToken), oss.CallbackResult(&bodyBytes), @@ -301,7 +312,8 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir } // UploadByMultipart upload by mutipart blocks -func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) (*UploadResult, error) { +func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer, + dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) { var ( chunks []oss.FileChunk parts []oss.UploadPart @@ -313,7 +325,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i err error ) - tmpF, err := stream.CacheFullInTempFile() + tmpF, err := s.CacheFullInTempFile() if err != nil { return nil, err } @@ -372,6 +384,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i quit <- struct{}{} }() + completedNum := atomic.Int32{} // consumers for i := 0; i < options.ThreadsNum; i++ { go func(threadId int) { @@ -384,6 +397,8 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i var part oss.UploadPart // 出现错误就继续尝试,共尝试3次 for retry := 0; retry < 3; retry++ { select { + case <-ctx.Done(): + break case <-ticker.C: if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken errCh <- errors.Wrap(err, "刷新token时出现错误") @@ -396,12 +411,18 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i continue } - if part, err = bucket.UploadPart(imur, bytes.NewBuffer(buf), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { + if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{ + Reader: bytes.NewBuffer(buf), + Ctx: ctx, + }, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { break } } if err != nil { - errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err)) + errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err)) + } else { + num := completedNum.Add(1) + up(float64(num) * 100.0 / float64(len(chunks))) } UploadedPartsCh <- part } diff --git a/drivers/123/driver.go b/drivers/123/driver.go index 3828a59d..1bf71ae6 100644 --- a/drivers/123/driver.go +++ b/drivers/123/driver.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "encoding/hex" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "net/url" @@ -185,32 +186,35 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error { } } -func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - // const DEFAULT int64 = 10485760 - h := md5.New() - // need to calculate md5 of the full content - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return err +func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + etag := file.GetHash().GetHash(utils.MD5) + if len(etag) < utils.MD5.Width { + // const DEFAULT int64 = 10485760 + h := md5.New() + // need to calculate md5 of the full content + tempFile, err := file.CacheFullInTempFile() + if err != nil { + return err + } + defer func() { + _ = tempFile.Close() + }() + if _, err = utils.CopyWithBuffer(h, tempFile); err != nil { + return err + } + _, err = tempFile.Seek(0, io.SeekStart) + if err != nil { + return err + } + etag = hex.EncodeToString(h.Sum(nil)) } - defer func() { - _ = tempFile.Close() - }() - if _, err = utils.CopyWithBuffer(h, tempFile); err != nil { - return err - } - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return err - } - etag := hex.EncodeToString(h.Sum(nil)) data := base.Json{ "driveId": 0, "duplicate": 2, // 2->覆盖 1->重命名 0->默认 "etag": etag, - "fileName": stream.GetName(), + "fileName": file.GetName(), "parentFileId": dstDir.GetID(), - "size": stream.GetSize(), + "size": file.GetSize(), "type": 0, } var resp UploadResp @@ -225,7 +229,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return nil } if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" { - err = d.newUpload(ctx, &resp, stream, tempFile, up) + err = d.newUpload(ctx, &resp, file, up) return err } else { cfg := &aws.Config{ @@ -239,15 +243,21 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return err } uploader := s3manager.NewUploader(s) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) } input := &s3manager.UploadInput{ Bucket: &resp.Data.Bucket, Key: &resp.Data.Key, - Body: tempFile, + Body: &stream.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + }, } _, err = uploader.UploadWithContext(ctx, input) + if err != nil { + return err + } } _, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 66627b4c..a472df55 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -69,7 +69,7 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F return err } -func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error { +func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error { chunkSize := int64(1024 * 1024 * 16) // fetch s3 pre signed urls chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize))) @@ -103,7 +103,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi if j == chunkCount { curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize } - err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl) + err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(file, chunkSize), curSize, false, getS3UploadUrl) if err != nil { return err } diff --git a/drivers/alist_v3/driver.go b/drivers/alist_v3/driver.go index 894bac64..679285e0 100644 --- a/drivers/alist_v3/driver.go +++ b/drivers/alist_v3/driver.go @@ -3,6 +3,7 @@ package alist_v3 import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "path" @@ -181,25 +182,28 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", stream) +func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) if err != nil { return err } req.Header.Set("Authorization", d.Token) - req.Header.Set("File-Path", path.Join(dstDir.GetPath(), stream.GetName())) + req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName())) req.Header.Set("Password", d.MetaPassword) - if md5 := stream.GetHash().GetHash(utils.MD5); len(md5) > 0 { + if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 { req.Header.Set("X-File-Md5", md5) } - if sha1 := stream.GetHash().GetHash(utils.SHA1); len(sha1) > 0 { + if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 { req.Header.Set("X-File-Sha1", sha1) } - if sha256 := stream.GetHash().GetHash(utils.SHA256); len(sha256) > 0 { + if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 { req.Header.Set("X-File-Sha256", sha256) } - req.ContentLength = stream.GetSize() + req.ContentLength = s.GetSize() // client := base.NewHttpClient() // client.Timeout = time.Hour * 6 res, err := base.HttpClient.Do(req) diff --git a/drivers/chaoxing/driver.go b/drivers/chaoxing/driver.go index 360c6e3d..9b526f8a 100644 --- a/drivers/chaoxing/driver.go +++ b/drivers/chaoxing/driver.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" "mime/multipart" "net/http" @@ -215,7 +216,7 @@ func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error { return nil } -func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { var resp UploadDataRsp _, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) { }, &resp) @@ -227,11 +228,11 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS } body := &bytes.Buffer{} writer := multipart.NewWriter(body) - filePart, err := writer.CreateFormFile("file", stream.GetName()) + filePart, err := writer.CreateFormFile("file", file.GetName()) if err != nil { return err } - _, err = utils.CopyWithBuffer(filePart, stream) + _, err = utils.CopyWithBuffer(filePart, file) if err != nil { return err } @@ -248,7 +249,14 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS if err != nil { return err } - req, err := http.NewRequest("POST", "https://pan-yz.chaoxing.com/upload", body) + r := &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: body, + Size: int64(body.Len()), + }, + UpdateProgress: up, + } + req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r) if err != nil { return err } diff --git a/drivers/ftp/driver.go b/drivers/ftp/driver.go index 05b9e49a..b3e95f93 100644 --- a/drivers/ftp/driver.go +++ b/drivers/ftp/driver.go @@ -2,6 +2,7 @@ package ftp import ( "context" + "github.com/alist-org/alist/v3/internal/stream" stdpath "path" "github.com/alist-org/alist/v3/internal/driver" @@ -114,13 +115,18 @@ func (d *FTP) Remove(ctx context.Context, obj model.Obj) error { } } -func (d *FTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { if err := d.login(); err != nil { return err } - // TODO: support cancel - path := stdpath.Join(dstDir.GetPath(), stream.GetName()) - return d.conn.Stor(encode(path, d.Encoding), stream) + path := stdpath.Join(dstDir.GetPath(), s.GetName()) + return d.conn.Stor(encode(path, d.Encoding), &stream.ReaderWithCtx{ + Reader: &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }, + Ctx: ctx, + }) } var _ driver.Driver = (*FTP)(nil) diff --git a/drivers/github/driver.go b/drivers/github/driver.go index eed06882..996c79c7 100644 --- a/drivers/github/driver.go +++ b/drivers/github/driver.go @@ -16,6 +16,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" log "github.com/sirupsen/logrus" @@ -649,15 +650,15 @@ func (d *Github) createGitKeep(path, message string) error { return nil } -func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) (string, error) { +func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.UpdateProgress) (string, error) { beforeContent := "{\"encoding\":\"base64\",\"content\":\"" afterContent := "\"}" - length := int64(len(beforeContent)) + calculateBase64Length(stream.GetSize()) + int64(len(afterContent)) + length := int64(len(beforeContent)) + calculateBase64Length(s.GetSize()) + int64(len(afterContent)) beforeContentReader := strings.NewReader(beforeContent) contentReader, contentWriter := io.Pipe() go func() { encoder := base64.NewEncoder(base64.StdEncoding, contentWriter) - if _, err := utils.CopyWithBuffer(encoder, stream); err != nil { + if _, err := utils.CopyWithBuffer(encoder, s); err != nil { _ = contentWriter.CloseWithError(err) return } @@ -667,10 +668,12 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv afterContentReader := strings.NewReader(afterContent) req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo), - &ReaderWithProgress{ - Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader), - Length: length, - Progress: up, + &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader), + Size: length, + }, + UpdateProgress: up, }) if err != nil { return "", err diff --git a/drivers/github/util.go b/drivers/github/util.go index 1e7f7fdb..85bc3cb9 100644 --- a/drivers/github/util.go +++ b/drivers/github/util.go @@ -7,26 +7,10 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" - "io" - "math" "strings" "text/template" ) -type ReaderWithProgress struct { - Reader io.Reader - Length int64 - Progress func(percentage float64) - offset int64 -} - -func (r *ReaderWithProgress) Read(p []byte) (int, error) { - n, err := r.Reader.Read(p) - r.offset += int64(n) - r.Progress(math.Min(100.0, float64(r.offset)/float64(r.Length)*100.0)) - return n, err -} - type MessageTemplateVars struct { UserName string ObjName string diff --git a/drivers/ilanzou/driver.go b/drivers/ilanzou/driver.go index 90ef7c1a..8681fed4 100644 --- a/drivers/ilanzou/driver.go +++ b/drivers/ilanzou/driver.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "encoding/hex" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "net/url" @@ -266,10 +267,10 @@ func (d *ILanZou) Remove(ctx context.Context, obj model.Obj) error { const DefaultPartSize = 1024 * 1024 * 8 -func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { +func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { h := md5.New() // need to calculate md5 of the full content - tempFile, err := stream.CacheFullInTempFile() + tempFile, err := s.CacheFullInTempFile() if err != nil { return nil, err } @@ -288,8 +289,8 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "fileId": "", - "fileName": stream.GetName(), - "fileSize": stream.GetSize()/1024 + 1, + "fileName": s.GetName(), + "fileSize": s.GetSize()/1024 + 1, "folderId": dstDir.GetID(), "md5": etag, "type": 1, @@ -301,13 +302,20 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt upToken := utils.Json.Get(res, "upToken").ToString() now := time.Now() key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli()) + reader := &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: tempFile, + Size: s.GetSize(), + }, + UpdateProgress: up, + } var token string - if stream.GetSize() <= DefaultPartSize { - res, err := d.upClient.R().SetMultipartFormData(map[string]string{ + if s.GetSize() <= DefaultPartSize { + res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{ "token": upToken, "key": key, - "fname": stream.GetName(), - }).SetMultipartField("file", stream.GetName(), stream.GetMimetype(), tempFile). + "fname": s.GetName(), + }).SetMultipartField("file", s.GetName(), s.GetMimetype(), reader). Post("https://upload.qiniup.com/") if err != nil { return nil, err @@ -321,10 +329,10 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt } uploadId := utils.Json.Get(res.Body(), "uploadId").ToString() parts := make([]Part, 0) - partNum := (stream.GetSize() + DefaultPartSize - 1) / DefaultPartSize + partNum := (s.GetSize() + DefaultPartSize - 1) / DefaultPartSize for i := 1; i <= int(partNum); i++ { u := fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s/%d", d.conf.bucket, keyBase64, uploadId, i) - res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(tempFile, DefaultPartSize)).Put(u) + res, err = d.upClient.R().SetContext(ctx).SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(reader, DefaultPartSize)).Put(u) if err != nil { return nil, err } @@ -335,7 +343,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt }) } res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(base.Json{ - "fnmae": stream.GetName(), + "fnmae": s.GetName(), "parts": parts, }).Post(fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s", d.conf.bucket, keyBase64, uploadId)) if err != nil { @@ -373,9 +381,9 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt ID: strconv.FormatInt(file.FileId, 10), //Path: , Name: file.FileName, - Size: stream.GetSize(), - Modified: stream.ModTime(), - Ctime: stream.CreateTime(), + Size: s.GetSize(), + Modified: s.ModTime(), + Ctime: s.CreateTime(), IsFolder: false, HashInfo: utils.NewHashInfo(utils.MD5, etag), }, nil diff --git a/drivers/ipfs_api/driver.go b/drivers/ipfs_api/driver.go index f6f81305..61886b38 100644 --- a/drivers/ipfs_api/driver.go +++ b/drivers/ipfs_api/driver.go @@ -3,6 +3,7 @@ package ipfs import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "net/url" stdpath "path" "path/filepath" @@ -108,9 +109,15 @@ func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error { return d.sh.FilesRm(ctx, obj.GetPath(), true) } -func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { // TODO upload file, optional - _, err := d.sh.Add(stream, ToFiles(stdpath.Join(dstDir.GetPath(), stream.GetName()))) + _, err := d.sh.Add(&stream.ReaderWithCtx{ + Reader: &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }, + Ctx: ctx, + }, ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName()))) return err } diff --git a/drivers/kodbox/driver.go b/drivers/kodbox/driver.go index eb5120a6..ff48ffb2 100644 --- a/drivers/kodbox/driver.go +++ b/drivers/kodbox/driver.go @@ -3,6 +3,7 @@ package kodbox import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" "net/http" @@ -225,14 +226,19 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error { return nil } -func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { +func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { var resp *CommonResp _, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) { - req.SetFileReader("file", stream.GetName(), stream). + r := &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + } + req.SetFileReader("file", s.GetName(), r). SetResult(&resp). SetFormData(map[string]string{ "path": dstDir.GetPath(), - }) + }). + SetContext(ctx) }) if err != nil { return nil, err @@ -244,8 +250,8 @@ func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return &model.ObjThumb{ Object: model.Object{ Path: resp.Info.(string), - Name: stream.GetName(), - Size: stream.GetSize(), + Name: s.GetName(), + Size: s.GetSize(), IsFolder: false, Modified: time.Now(), Ctime: time.Now(), diff --git a/drivers/lanzou/driver.go b/drivers/lanzou/driver.go index 9e73f052..90635d16 100644 --- a/drivers/lanzou/driver.go +++ b/drivers/lanzou/driver.go @@ -2,6 +2,7 @@ package lanzou import ( "context" + "github.com/alist-org/alist/v3/internal/stream" "net/http" "github.com/alist-org/alist/v3/drivers/base" @@ -208,7 +209,7 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error { return errs.NotSupport } -func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { +func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { if d.IsCookie() || d.IsAccount() { var resp RespText[[]FileOrFolder] _, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) { @@ -217,9 +218,12 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "vie": "2", "ve": "2", "id": "WU_FILE_0", - "name": stream.GetName(), + "name": s.GetName(), "folder_id_bb_n": dstDir.GetID(), - }).SetFileReader("upload_file", stream.GetName(), stream).SetContext(ctx) + }).SetFileReader("upload_file", s.GetName(), &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }).SetContext(ctx) }, &resp, true) if err != nil { return nil, err diff --git a/drivers/mediatrack/driver.go b/drivers/mediatrack/driver.go index f0f1ded0..ed53f8ee 100644 --- a/drivers/mediatrack/driver.go +++ b/drivers/mediatrack/driver.go @@ -5,6 +5,7 @@ import ( "crypto/md5" "encoding/hex" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "strconv" @@ -161,7 +162,7 @@ func (d *MediaTrack) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { src := "assets/" + uuid.New().String() var resp UploadResp _, err := d.request("https://jayce.api.mediatrack.cn/v3/storage/tokens/asset", http.MethodGet, func(req *resty.Request) { @@ -180,7 +181,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if err != nil { return err } - tempFile, err := stream.CacheFullInTempFile() + tempFile, err := file.CacheFullInTempFile() if err != nil { return err } @@ -188,13 +189,19 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil _ = tempFile.Close() }() uploader := s3manager.NewUploader(s) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) } input := &s3manager.UploadInput{ Bucket: &resp.Data.Bucket, Key: &resp.Data.Object, - Body: tempFile, + Body: &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: tempFile, + Size: file.GetSize(), + }, + UpdateProgress: up, + }, } _, err = uploader.UploadWithContext(ctx, input) if err != nil { @@ -213,12 +220,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil hash := hex.EncodeToString(h.Sum(nil)) data := base.Json{ "category": 0, - "description": stream.GetName(), + "description": file.GetName(), "hash": hash, - "mime": stream.GetMimetype(), - "size": stream.GetSize(), + "mime": file.GetMimetype(), + "size": file.GetSize(), "src": src, - "title": stream.GetName(), + "title": file.GetName(), "type": 0, } _, err = d.request(url, http.MethodPost, func(req *resty.Request) { diff --git a/drivers/netease_music/driver.go b/drivers/netease_music/driver.go index c0d103de..08460cce 100644 --- a/drivers/netease_music/driver.go +++ b/drivers/netease_music/driver.go @@ -88,7 +88,7 @@ func (d *NeteaseMusic) Remove(ctx context.Context, obj model.Obj) error { } func (d *NeteaseMusic) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - return d.putSongStream(stream) + return d.putSongStream(ctx, stream, up) } func (d *NeteaseMusic) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { diff --git a/drivers/netease_music/types.go b/drivers/netease_music/types.go index 0e156ad1..332f75e9 100644 --- a/drivers/netease_music/types.go +++ b/drivers/netease_music/types.go @@ -2,6 +2,7 @@ package netease_music import ( "context" + "github.com/alist-org/alist/v3/internal/driver" "io" "net/http" "strconv" @@ -71,6 +72,8 @@ func (lrc *LyricObj) getLyricLink() *model.Link { type ReqOption struct { crypto string stream model.FileStreamer + up driver.UpdateProgress + ctx context.Context data map[string]string headers map[string]string cookies []*http.Cookie @@ -113,3 +116,16 @@ func (ch *Characteristic) merge(data map[string]string) map[string]interface{} { } return body } + +type InlineReadCloser struct { + io.Reader + io.Closer +} + +func (rc *InlineReadCloser) Read(p []byte) (int, error) { + return rc.Reader.Read(p) +} + +func (rc *InlineReadCloser) Close() error { + return rc.Closer.Close() +} diff --git a/drivers/netease_music/upload.go b/drivers/netease_music/upload.go index 7f580bd1..3ff6216b 100644 --- a/drivers/netease_music/upload.go +++ b/drivers/netease_music/upload.go @@ -1,8 +1,10 @@ package netease_music import ( + "context" "crypto/md5" "encoding/hex" + "github.com/alist-org/alist/v3/internal/driver" "io" "net/http" "strconv" @@ -47,9 +49,12 @@ func (u *uploader) init(stream model.FileStreamer) error { } h := md5.New() - utils.CopyWithBuffer(h, stream) + _, err := utils.CopyWithBuffer(h, stream) + if err != nil { + return err + } u.md5 = hex.EncodeToString(h.Sum(nil)) - _, err := u.file.Seek(0, io.SeekStart) + _, err = u.file.Seek(0, io.SeekStart) if err != nil { return err } @@ -167,7 +172,7 @@ func (u *uploader) publishInfo(resourceId string) error { return nil } -func (u *uploader) upload(stream model.FileStreamer) error { +func (u *uploader) upload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error { bucket := "jd-musicrep-privatecloud-audio-public" token, err := u.allocToken(bucket) if err != nil { @@ -192,6 +197,8 @@ func (u *uploader) upload(stream model.FileStreamer) error { http.MethodPost, ReqOption{ stream: stream, + up: up, + ctx: ctx, headers: map[string]string{ "x-nos-token": token.token, "Content-Type": "audio/mpeg", diff --git a/drivers/netease_music/util.go b/drivers/netease_music/util.go index 4d0696eb..25efde77 100644 --- a/drivers/netease_music/util.go +++ b/drivers/netease_music/util.go @@ -1,7 +1,9 @@ package netease_music import ( - "io" + "context" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/stream" "net/http" "path" "regexp" @@ -58,20 +60,38 @@ func (d *NeteaseMusic) request(url, method string, opt ReqOption) ([]byte, error url = "https://music.163.com/api/linux/forward" } + if opt.ctx != nil { + req.SetContext(opt.ctx) + } if method == http.MethodPost { if opt.stream != nil { + if opt.up == nil { + opt.up = func(_ float64) {} + } req.SetContentLength(true) - req.SetBody(io.ReadCloser(opt.stream)) + req.SetBody(&InlineReadCloser{ + Reader: &stream.ReaderUpdatingProgress{ + Reader: opt.stream, + UpdateProgress: opt.up, + }, + Closer: opt.stream, + }) } else { req.SetFormData(data) } res, err := req.Post(url) - return res.Body(), err + if err != nil { + return nil, err + } + return res.Body(), nil } if method == http.MethodGet { res, err := req.Get(url) - return res.Body(), err + if err != nil { + return nil, err + } + return res.Body(), nil } return nil, errs.NotImplement @@ -206,7 +226,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error { return err } -func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error { +func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error { tmp, err := stream.CacheFullInTempFile() if err != nil { return err @@ -231,7 +251,7 @@ func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error { } if u.meta.needUpload { - err = u.upload(stream) + err = u.upload(ctx, stream, up) if err != nil { return err } diff --git a/drivers/pikpak/driver.go b/drivers/pikpak/driver.go index 3db273d6..504b1d0e 100644 --- a/drivers/pikpak/driver.go +++ b/drivers/pikpak/driver.go @@ -255,10 +255,10 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB,改用普通模式上传 - return d.UploadByOSS(¶ms, stream, up) + return d.UploadByOSS(ctx, ¶ms, stream, up) } // 分片上传 - return d.UploadByMultipart(¶ms, stream.GetSize(), stream, up) + return d.UploadByMultipart(ctx, ¶ms, stream.GetSize(), stream, up) } // 离线下载文件 diff --git a/drivers/pikpak/util.go b/drivers/pikpak/util.go index e8f3c854..eb96a42a 100644 --- a/drivers/pikpak/util.go +++ b/drivers/pikpak/util.go @@ -2,6 +2,7 @@ package pikpak import ( "bytes" + "context" "crypto/md5" "crypto/sha1" "encoding/hex" @@ -9,6 +10,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/aliyun/aliyun-oss-go-sdk/oss" jsoniter "github.com/json-iterator/go" @@ -19,6 +21,7 @@ import ( "regexp" "strings" "sync" + "sync/atomic" "time" "github.com/alist-org/alist/v3/drivers/base" @@ -417,7 +420,7 @@ func (d *PikPak) refreshCaptchaToken(action string, metas map[string]string) err return nil } -func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.FileStreamer, up driver.UpdateProgress) error { ossClient, err := oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret) if err != nil { return err @@ -427,14 +430,20 @@ func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up dri return err } - err = bucket.PutObject(params.Key, stream, OssOption(params)...) + err = bucket.PutObject(params.Key, &stream.ReaderWithCtx{ + Reader: &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }, + Ctx: ctx, + }, OssOption(params)...) if err != nil { return err } return nil } -func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error { var ( chunks []oss.FileChunk parts []oss.UploadPart @@ -444,7 +453,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode err error ) - tmpF, err := stream.CacheFullInTempFile() + tmpF, err := s.CacheFullInTempFile() if err != nil { return err } @@ -488,6 +497,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode quit <- struct{}{} }() + completedNum := atomic.Int32{} // consumers for i := 0; i < ThreadsNum; i++ { go func(threadId int) { @@ -500,6 +510,8 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode var part oss.UploadPart // 出现错误就继续尝试,共尝试3次 for retry := 0; retry < 3; retry++ { select { + case <-ctx.Done(): + break case <-ticker.C: errCh <- errors.Wrap(err, "ossToken 过期") default: @@ -511,12 +523,18 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode } b := bytes.NewBuffer(buf) - if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil { + if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{ + Reader: b, + Ctx: ctx, + }, chunk.Size, chunk.Number, OssOption(params)...); err == nil { break } } if err != nil { - errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err)) + errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err)) + } else { + num := completedNum.Add(1) + up(float64(num) * 100.0 / float64(len(chunks))) } UploadedPartsCh <- part } @@ -547,7 +565,7 @@ LOOP: // EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的 if _, err = bucket.CompleteMultipartUpload(imur, parts, OssOption(params)...); err != nil && !errors.Is(err, io.EOF) { // 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的 - if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") { + if filename := filepath.Base(s.GetName()); !strings.ContainsAny(filename, "&<") { return err } } diff --git a/drivers/quqi/driver.go b/drivers/quqi/driver.go index 51e54981..2ab972ca 100644 --- a/drivers/quqi/driver.go +++ b/drivers/quqi/driver.go @@ -3,6 +3,7 @@ package quqi import ( "bytes" "context" + "errors" "io" "strconv" "strings" @@ -11,6 +12,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + istream "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils/random" "github.com/aws/aws-sdk-go/aws" @@ -385,9 +387,16 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea } uploader := s3manager.NewUploader(s) buf := make([]byte, 1024*1024*2) + fup := &istream.ReaderUpdatingProgress{ + Reader: &istream.SimpleReaderWithSize{ + Reader: f, + Size: int64(len(buf)), + }, + UpdateProgress: up, + } for partNumber := int64(1); ; partNumber++ { - n, err := io.ReadFull(f, buf) - if err != nil && err != io.ErrUnexpectedEOF { + n, err := io.ReadFull(fup, buf) + if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) { if err == io.EOF { break } diff --git a/drivers/s3/driver.go b/drivers/s3/driver.go index 82c050a1..a7e924e2 100644 --- a/drivers/s3/driver.go +++ b/drivers/s3/driver.go @@ -163,18 +163,21 @@ func (d *S3) Remove(ctx context.Context, obj model.Obj) error { return d.removeFile(obj.GetPath()) } -func (d *S3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { uploader := s3manager.NewUploader(d.Session) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if s.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = s.GetSize() / (s3manager.MaxUploadParts - 1) } - key := getKey(stdpath.Join(dstDir.GetPath(), stream.GetName()), false) - contentType := stream.GetMimetype() + key := getKey(stdpath.Join(dstDir.GetPath(), s.GetName()), false) + contentType := s.GetMimetype() log.Debugln("key:", key) input := &s3manager.UploadInput{ - Bucket: &d.Bucket, - Key: &key, - Body: stream, + Bucket: &d.Bucket, + Key: &key, + Body: &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }, ContentType: &contentType, } _, err := uploader.UploadWithContext(ctx, input) diff --git a/drivers/seafile/driver.go b/drivers/seafile/driver.go index 6d1f16da..f23038d1 100644 --- a/drivers/seafile/driver.go +++ b/drivers/seafile/driver.go @@ -3,6 +3,7 @@ package seafile import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "net/http" "strings" "time" @@ -197,7 +198,7 @@ func (d *Seafile) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { repo, path, err := d.getRepoAndPath(dstDir.GetPath()) if err != nil { return err @@ -214,11 +215,16 @@ func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt u := string(res) u = u[1 : len(u)-1] // remove quotes _, err = d.request(http.MethodPost, u, func(req *resty.Request) { - req.SetFileReader("file", stream.GetName(), stream). + r := &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + } + req.SetFileReader("file", s.GetName(), r). SetFormData(map[string]string{ "parent_dir": path, "replace": "1", - }) + }). + SetContext(ctx) }) return err } diff --git a/drivers/template/driver.go b/drivers/template/driver.go index 439f57f3..ff3648db 100644 --- a/drivers/template/driver.go +++ b/drivers/template/driver.go @@ -66,11 +66,33 @@ func (d *Template) Remove(ctx context.Context, obj model.Obj) error { return errs.NotImplement } -func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { +func (d *Template) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { // TODO upload file, optional return nil, errs.NotImplement } +func (d *Template) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Template) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Template) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { + // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional + // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir + // return errs.NotImplement to use an internal archive tool + return nil, errs.NotImplement +} + //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/thunder/driver.go b/drivers/thunder/driver.go index 8403f261..1b7f0af6 100644 --- a/drivers/thunder/driver.go +++ b/drivers/thunder/driver.go @@ -3,6 +3,7 @@ package thunder import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "net/http" "strconv" "strings" @@ -332,16 +333,16 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error { return err } -func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - hi := stream.GetHash() +func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + hi := file.GetHash() gcid := hi.GetHash(hash_extend.GCID) if len(gcid) < hash_extend.GCID.Width { - tFile, err := stream.CacheFullInTempFile() + tFile, err := file.CacheFullInTempFile() if err != nil { return err } - gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize()) + gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize()) if err != nil { return err } @@ -353,8 +354,8 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model. r.SetBody(&base.Json{ "kind": FILE, "parent_id": dstDir.GetID(), - "name": stream.GetName(), - "size": stream.GetSize(), + "name": file.GetName(), + "size": file.GetSize(), "hash": gcid, "upload_type": UPLOAD_TYPE_RESUMABLE, }) @@ -375,14 +376,17 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model. return err } uploader := s3manager.NewUploader(s) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) } _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ Bucket: aws.String(param.Bucket), Key: aws.String(param.Key), Expires: aws.Time(param.Expiration), - Body: stream, + Body: &stream.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + }, }) return err } diff --git a/drivers/thunderx/driver.go b/drivers/thunderx/driver.go index b9ee668c..93e07ca9 100644 --- a/drivers/thunderx/driver.go +++ b/drivers/thunderx/driver.go @@ -8,6 +8,7 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/aws/aws-sdk-go/aws" @@ -363,16 +364,16 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error { return err } -func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - hi := stream.GetHash() +func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + hi := file.GetHash() gcid := hi.GetHash(hash_extend.GCID) if len(gcid) < hash_extend.GCID.Width { - tFile, err := stream.CacheFullInTempFile() + tFile, err := file.CacheFullInTempFile() if err != nil { return err } - gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize()) + gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize()) if err != nil { return err } @@ -384,8 +385,8 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model r.SetBody(&base.Json{ "kind": FILE, "parent_id": dstDir.GetID(), - "name": stream.GetName(), - "size": stream.GetSize(), + "name": file.GetName(), + "size": file.GetSize(), "hash": gcid, "upload_type": UPLOAD_TYPE_RESUMABLE, }) @@ -406,14 +407,17 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model return err } uploader := s3manager.NewUploader(s) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) } _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ Bucket: aws.String(param.Bucket), Key: aws.String(param.Key), Expires: aws.Time(param.Expiration), - Body: stream, + Body: &stream.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + }, }) return err } diff --git a/drivers/trainbit/driver.go b/drivers/trainbit/driver.go index 795b2fb8..2b1815ed 100644 --- a/drivers/trainbit/driver.go +++ b/drivers/trainbit/driver.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "net/url" @@ -114,23 +115,18 @@ func (d *Trainbit) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { endpoint, _ := url.Parse("https://tb28.trainbit.com/api/upload/send_raw/") query := &url.Values{} query.Add("q", strings.Split(dstDir.GetID(), "_")[1]) query.Add("guid", guid) - query.Add("name", url.QueryEscape(local2provider(stream.GetName(), false)+".")) + query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+".")) endpoint.RawQuery = query.Encode() - var total int64 - total = 0 - progressReader := &ProgressReader{ - stream, - func(byteNum int) { - total += int64(byteNum) - up(float64(total) / float64(stream.GetSize()) * 100) - }, + progressReader := &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, } - req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader) if err != nil { return err } diff --git a/drivers/trainbit/util.go b/drivers/trainbit/util.go index afc111a8..486e8851 100644 --- a/drivers/trainbit/util.go +++ b/drivers/trainbit/util.go @@ -13,17 +13,6 @@ import ( "github.com/alist-org/alist/v3/internal/model" ) -type ProgressReader struct { - io.Reader - reporter func(byteNum int) -} - -func (progressReader *ProgressReader) Read(data []byte) (int, error) { - byteNum, err := progressReader.Reader.Read(data) - progressReader.reporter(byteNum) - return byteNum, err -} - func get(url string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) { req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { diff --git a/drivers/uss/driver.go b/drivers/uss/driver.go index 447515d8..3c54797c 100644 --- a/drivers/uss/driver.go +++ b/drivers/uss/driver.go @@ -3,6 +3,7 @@ package uss import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "net/url" "path" "strings" @@ -122,11 +123,16 @@ func (d *USS) Remove(ctx context.Context, obj model.Obj) error { }) } -func (d *USS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - // TODO not support cancel?? +func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { return d.client.Put(&upyun.PutObjectConfig{ - Path: getKey(path.Join(dstDir.GetPath(), stream.GetName()), false), - Reader: stream, + Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false), + Reader: &stream.ReaderWithCtx{ + Reader: &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }, + Ctx: ctx, + }, }) } diff --git a/drivers/webdav/driver.go b/drivers/webdav/driver.go index b402b1db..35240c49 100644 --- a/drivers/webdav/driver.go +++ b/drivers/webdav/driver.go @@ -2,6 +2,7 @@ package webdav import ( "context" + "github.com/alist-org/alist/v3/internal/stream" "net/http" "os" "path" @@ -93,13 +94,18 @@ func (d *WebDav) Remove(ctx context.Context, obj model.Obj) error { return d.client.RemoveAll(getPath(obj)) } -func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { callback := func(r *http.Request) { - r.Header.Set("Content-Type", stream.GetMimetype()) - r.ContentLength = stream.GetSize() + r.Header.Set("Content-Type", s.GetMimetype()) + r.ContentLength = s.GetSize() } - // TODO: support cancel - err := d.client.WriteStream(path.Join(dstDir.GetPath(), stream.GetName()), stream, 0644, callback) + err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), &stream.ReaderWithCtx{ + Reader: &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }, + Ctx: ctx, + }, 0644, callback) return err } diff --git a/drivers/weiyun/driver.go b/drivers/weiyun/driver.go index e6d5897c..59bd7237 100644 --- a/drivers/weiyun/driver.go +++ b/drivers/weiyun/driver.go @@ -7,6 +7,7 @@ import ( "math" "net/http" "strconv" + "sync/atomic" "time" "github.com/alist-org/alist/v3/drivers/base" @@ -311,77 +312,82 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr // NOTE: // 秒传需要sha1最后一个状态,但sha1无法逆运算需要读完整个文件(或许可以??) // 服务器支持上传进度恢复,不需要额外实现 - if folder, ok := dstDir.(*Folder); ok { - file, err := stream.CacheFullInTempFile() - if err != nil { - return nil, err - } + var folder *Folder + var ok bool + if folder, ok = dstDir.(*Folder); !ok { + return nil, errs.NotSupport + } + file, err := stream.CacheFullInTempFile() + if err != nil { + return nil, err + } - // step 1. - preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{ - PdirKey: folder.GetPKey(), - DirKey: folder.DirKey, + // step 1. + preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{ + PdirKey: folder.GetPKey(), + DirKey: folder.DirKey, - FileName: stream.GetName(), - FileSize: stream.GetSize(), - File: file, + FileName: stream.GetName(), + FileSize: stream.GetSize(), + File: file, - ChannelCount: 4, - FileExistOption: 1, - }) - if err != nil { - return nil, err - } + ChannelCount: 4, + FileExistOption: 1, + }) + if err != nil { + return nil, err + } - // not fast upload - if !preData.FileExist { - // step.2 增加上传通道 - if len(preData.ChannelList) < d.uploadThread { - newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData) - if err != nil { - return nil, err - } - preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...) - } - // step.3 上传 - threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList), - retry.Attempts(3), - retry.Delay(time.Second), - retry.DelayType(retry.BackOffDelay)) - - for _, channel := range preData.ChannelList { - if utils.IsCanceled(upCtx) { - break - } - - var channel = channel - threadG.Go(func(ctx context.Context) error { - for { - channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len))) - upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData, - io.NewSectionReader(file, channel.Offset, int64(channel.Len))) - if err != nil { - return err - } - // 上传完成 - if upData.UploadState != 1 { - return nil - } - channel = upData.Channel - } - }) - } - if err = threadG.Wait(); err != nil { + // not fast upload + if !preData.FileExist { + // step.2 增加上传通道 + if len(preData.ChannelList) < d.uploadThread { + newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData) + if err != nil { return nil, err } + preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...) } + // step.3 上传 + threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList), + retry.Attempts(3), + retry.Delay(time.Second), + retry.DelayType(retry.BackOffDelay)) - return &File{ - PFolder: folder, - File: preData.File, - }, nil + total := atomic.Int64{} + for _, channel := range preData.ChannelList { + if utils.IsCanceled(upCtx) { + break + } + + var channel = channel + threadG.Go(func(ctx context.Context) error { + for { + channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len))) + upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData, + io.NewSectionReader(file, channel.Offset, int64(channel.Len))) + if err != nil { + return err + } + cur := total.Add(int64(channel.Len)) + up(float64(cur) * 100.0 / float64(stream.GetSize())) + // 上传完成 + if upData.UploadState != 1 { + return nil + } + channel = upData.Channel + } + }) + } + if err = threadG.Wait(); err != nil { + return nil, err + } } - return nil, errs.NotSupport + + return &File{ + PFolder: folder, + File: preData.File, + }, nil } // func (d *WeiYun) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { diff --git a/drivers/wopan/driver.go b/drivers/wopan/driver.go index bccce4b1..86093fc1 100644 --- a/drivers/wopan/driver.go +++ b/drivers/wopan/driver.go @@ -161,6 +161,7 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre OnProgress: func(current, total int64) { up(100 * float64(current) / float64(total)) }, + Ctx: ctx, }) return err } diff --git a/drivers/yandex_disk/driver.go b/drivers/yandex_disk/driver.go index 5af9f2e4..fe858519 100644 --- a/drivers/yandex_disk/driver.go +++ b/drivers/yandex_disk/driver.go @@ -2,6 +2,7 @@ package yandex_disk import ( "context" + "github.com/alist-org/alist/v3/internal/stream" "net/http" "path" "strconv" @@ -106,25 +107,30 @@ func (d *YandexDisk) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { var resp UploadResp _, err := d.request("/upload", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(map[string]string{ - "path": path.Join(dstDir.GetPath(), stream.GetName()), + "path": path.Join(dstDir.GetPath(), s.GetName()), "overwrite": "true", }) }, &resp) if err != nil { return err } - req, err := http.NewRequest(resp.Method, resp.Href, stream) + req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) if err != nil { return err } - req = req.WithContext(ctx) - req.Header.Set("Content-Length", strconv.FormatInt(stream.GetSize(), 10)) + req.Header.Set("Content-Length", strconv.FormatInt(s.GetSize(), 10)) req.Header.Set("Content-Type", "application/octet-stream") res, err := base.HttpClient.Do(req) + if err != nil { + return err + } _ = res.Body.Close() return err } diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 09fd42e7..292f8e6a 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -77,7 +77,7 @@ type Remove interface { } type Put interface { - Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) error + Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error } type PutURL interface { @@ -113,7 +113,7 @@ type CopyResult interface { } type PutResult interface { - Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) (model.Obj, error) + Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error) } type PutURLResult interface { @@ -159,7 +159,7 @@ type ArchiveDecompressResult interface { ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) } -type UpdateProgress model.UpdateProgress +type UpdateProgress = model.UpdateProgress type Progress struct { Total int64 diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 1962fb46..74646bfb 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -562,3 +562,17 @@ func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) { func (f *FileReadAtSeeker) Close() error { return f.ss.Close() } + +type ReaderWithCtx struct { + io.Reader + Ctx context.Context +} + +func (r *ReaderWithCtx) Read(p []byte) (n int, err error) { + select { + case <-r.Ctx.Done(): + return 0, r.Ctx.Err() + default: + return r.Reader.Read(p) + } +} From 39bde328ee10e226a6ee1689c3ae979ee10566cb Mon Sep 17 00:00:00 2001 From: Sakana Date: Sat, 1 Feb 2025 17:32:58 +0800 Subject: [PATCH 107/187] fix(lenovonas_share): the size of the directory (#7914) --- drivers/lenovonas_share/types.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/lenovonas_share/types.go b/drivers/lenovonas_share/types.go index 77b966d3..37ff1465 100644 --- a/drivers/lenovonas_share/types.go +++ b/drivers/lenovonas_share/types.go @@ -47,7 +47,11 @@ func (f File) GetPath() string { } func (f File) GetSize() int64 { - return f.Size + if f.IsDir() { + return 0 + } else { + return f.Size + } } func (f File) GetName() string { @@ -70,10 +74,6 @@ func (f File) GetID() string { return f.GetPath() } -func (f File) Thumb() string { - return "" -} - type Files struct { Data struct { List []File `json:"list"` From 6164e4577b68caa53da3e85e29a2a24244f4022f Mon Sep 17 00:00:00 2001 From: hshpy Date: Wed, 5 Feb 2025 19:22:10 +0800 Subject: [PATCH 108/187] fix: missing args when using alias driver (#7941 close #7932) --- server/handles/fsread.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/server/handles/fsread.go b/server/handles/fsread.go index 7c580f63..0a62f1ff 100644 --- a/server/handles/fsread.go +++ b/server/handles/fsread.go @@ -303,9 +303,10 @@ func FsGet(c *gin.Context) { } else { // if storage is not proxy, use raw url by fs.Link link, _, err := fs.Link(c, reqPath, model.LinkArgs{ - IP: c.ClientIP(), - Header: c.Request.Header, - HttpReq: c.Request, + IP: c.ClientIP(), + Header: c.Request.Header, + HttpReq: c.Request, + Redirect: true, }) if err != nil { common.ErrorResp(c, err, 500) From f7958077532be308a4c38d7adb27e4a2e4cc10a1 Mon Sep 17 00:00:00 2001 From: Sakana Date: Sun, 9 Feb 2025 18:30:38 +0800 Subject: [PATCH 109/187] feat(github_releases): support dir size for show all version (#7938) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor * 修改默认 RepoStructure * feat: 支持使用 gh-proxy --- drivers/github_releases/driver.go | 158 ++++++++++++---------- drivers/github_releases/meta.go | 3 +- drivers/github_releases/models.go | 86 ++++++++++++ drivers/github_releases/types.go | 201 ++++++++++++++++++++++++---- drivers/github_releases/util.go | 210 ++++++------------------------ 5 files changed, 386 insertions(+), 272 deletions(-) create mode 100644 drivers/github_releases/models.go diff --git a/drivers/github_releases/driver.go b/drivers/github_releases/driver.go index 79f2b582..b35aa57a 100644 --- a/drivers/github_releases/driver.go +++ b/drivers/github_releases/driver.go @@ -4,8 +4,6 @@ import ( "context" "fmt" "net/http" - "time" - "strings" "github.com/alist-org/alist/v3/internal/driver" @@ -18,7 +16,7 @@ type GithubReleases struct { model.Storage Addition - releases []Release + points []MountPoint } func (d *GithubReleases) Config() driver.Config { @@ -30,17 +28,11 @@ func (d *GithubReleases) GetAddition() driver.Additional { } func (d *GithubReleases) Init(ctx context.Context) error { - SetHeader(d.Addition.Token) - repos, err := ParseRepos(d.Addition.RepoStructure, d.Addition.ShowAllVersion) - if err != nil { - return err - } - d.releases = repos + d.ParseRepos(d.Addition.RepoStructure) return nil } func (d *GithubReleases) Drop(ctx context.Context) error { - ClearCache() return nil } @@ -48,67 +40,83 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis files := make([]File, 0) path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/")) - for _, repo := range d.releases { - if repo.Path == path { // 与仓库路径相同 - resp, err := GetRepoReleaseInfo(repo.RepoName, repo.ID, path, d.Storage.CacheExpiration) - if err != nil { - return nil, err - } - files = append(files, resp.Files...) + for i := range d.points { + point := &d.points[i] - if d.Addition.ShowReadme { - resp, err := GetGithubOtherFile(repo.RepoName, path, d.Storage.CacheExpiration) - if err != nil { - return nil, err + if !d.Addition.ShowAllVersion { // latest + point.RequestRelease(d.GetRequest, args.Refresh) + + if point.Point == path { // 与仓库路径相同 + files = append(files, point.GetLatestRelease()...) + if d.Addition.ShowReadme { + files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...) + } + } else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录 + nextDir := GetNextDir(point.Point, path) + if nextDir == "" { + continue } - files = append(files, *resp...) - } - } else if strings.HasPrefix(repo.Path, path) { // 仓库路径是目录的子目录 - nextDir := GetNextDir(repo.Path, path) - if nextDir == "" { - continue - } - if d.Addition.ShowAllVersion { - files = append(files, File{ - FileName: nextDir, - Size: 0, - CreateAt: time.Time{}, - UpdateAt: time.Time{}, - Url: "", - Type: "dir", - Path: fmt.Sprintf("%s/%s", path, nextDir), - }) - continue - } - - repo, _ := GetRepoReleaseInfo(repo.RepoName, repo.Version, path, d.Storage.CacheExpiration) - - hasSameDir := false - for index, file := range files { - if file.FileName == nextDir { - hasSameDir = true - files[index].Size += repo.Size - files[index].UpdateAt = func(a time.Time, b time.Time) time.Time { - if a.After(b) { - return a - } - return b - }(files[index].UpdateAt, repo.UpdateAt) - break + hasSameDir := false + for index := range files { + if files[index].GetName() == nextDir { + hasSameDir = true + files[index].Size += point.GetLatestSize() + break + } + } + if !hasSameDir { + files = append(files, File{ + Path: path + "/" + nextDir, + FileName: nextDir, + Size: point.GetLatestSize(), + UpdateAt: point.Release.PublishedAt, + CreateAt: point.Release.CreatedAt, + Type: "dir", + Url: "", + }) } } + } else { // all version + point.RequestReleases(d.GetRequest, args.Refresh) - if !hasSameDir { - files = append(files, File{ - FileName: nextDir, - Size: repo.Size, - CreateAt: repo.CreateAt, - UpdateAt: repo.UpdateAt, - Url: repo.Url, - Type: "dir", - Path: fmt.Sprintf("%s/%s", path, nextDir), - }) + if point.Point == path { // 与仓库路径相同 + files = append(files, point.GetAllVersion()...) + if d.Addition.ShowReadme { + files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...) + } + } else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录 + nextDir := GetNextDir(point.Point, path) + if nextDir == "" { + continue + } + + hasSameDir := false + for index := range files { + if files[index].GetName() == nextDir { + hasSameDir = true + files[index].Size += point.GetAllVersionSize() + break + } + } + if !hasSameDir { + files = append(files, File{ + FileName: nextDir, + Path: path + "/" + nextDir, + Size: point.GetAllVersionSize(), + UpdateAt: (*point.Releases)[0].PublishedAt, + CreateAt: (*point.Releases)[0].CreatedAt, + Type: "dir", + Url: "", + }) + } + } else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录 + tagName := GetNextDir(path, point.Point) + if tagName == "" { + continue + } + + files = append(files, point.GetReleaseByTagName(tagName)...) } } } @@ -119,35 +127,41 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis } func (d *GithubReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + url := file.GetID() + gh_proxy := strings.TrimSpace(d.Addition.GitHubProxy) + + if gh_proxy != "" { + url = strings.Replace(url, "https://github.com", gh_proxy, 1) + } + link := model.Link{ - URL: file.GetID(), + URL: url, Header: http.Header{}, } return &link, nil } func (d *GithubReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + // TODO create folder, optional return nil, errs.NotImplement } func (d *GithubReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO move obj, optional return nil, errs.NotImplement } func (d *GithubReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + // TODO rename obj, optional return nil, errs.NotImplement } func (d *GithubReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO copy obj, optional return nil, errs.NotImplement } func (d *GithubReleases) Remove(ctx context.Context, obj model.Obj) error { + // TODO remove obj, optional return errs.NotImplement } - -func (d *GithubReleases) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - return nil, errs.NotImplement -} - -var _ driver.Driver = (*GithubReleases)(nil) diff --git a/drivers/github_releases/meta.go b/drivers/github_releases/meta.go index ca6ca5dc..47b84d37 100644 --- a/drivers/github_releases/meta.go +++ b/drivers/github_releases/meta.go @@ -7,10 +7,11 @@ import ( type Addition struct { driver.RootID - RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"/path/to/alist-gh:alistGo/alist\n/path/to2/alist-web-gh:AlistGo/alist-web" help:"structure:[path:]org/repo"` + RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"` ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"` Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"` ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"` + GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "` } var config = driver.Config{ diff --git a/drivers/github_releases/models.go b/drivers/github_releases/models.go new file mode 100644 index 00000000..a9a0e493 --- /dev/null +++ b/drivers/github_releases/models.go @@ -0,0 +1,86 @@ +package github_releases + +type Release struct { + Url string `json:"url"` + AssetsUrl string `json:"assets_url"` + UploadUrl string `json:"upload_url"` + HtmlUrl string `json:"html_url"` + Id int `json:"id"` + Author User `json:"author"` + NodeId string `json:"node_id"` + TagName string `json:"tag_name"` + TargetCommitish string `json:"target_commitish"` + Name string `json:"name"` + Draft bool `json:"draft"` + Prerelease bool `json:"prerelease"` + CreatedAt string `json:"created_at"` + PublishedAt string `json:"published_at"` + Assets []Asset `json:"assets"` + TarballUrl string `json:"tarball_url"` + ZipballUrl string `json:"zipball_url"` + Body string `json:"body"` + Reactions Reactions `json:"reactions"` +} + +type User struct { + Login string `json:"login"` + Id int `json:"id"` + NodeId string `json:"node_id"` + AvatarUrl string `json:"avatar_url"` + GravatarId string `json:"gravatar_id"` + Url string `json:"url"` + HtmlUrl string `json:"html_url"` + FollowersUrl string `json:"followers_url"` + FollowingUrl string `json:"following_url"` + GistsUrl string `json:"gists_url"` + StarredUrl string `json:"starred_url"` + SubscriptionsUrl string `json:"subscriptions_url"` + OrganizationsUrl string `json:"organizations_url"` + ReposUrl string `json:"repos_url"` + EventsUrl string `json:"events_url"` + ReceivedEventsUrl string `json:"received_events_url"` + Type string `json:"type"` + UserViewType string `json:"user_view_type"` + SiteAdmin bool `json:"site_admin"` +} + +type Asset struct { + Url string `json:"url"` + Id int `json:"id"` + NodeId string `json:"node_id"` + Name string `json:"name"` + Label string `json:"label"` + Uploader User `json:"uploader"` + ContentType string `json:"content_type"` + State string `json:"state"` + Size int64 `json:"size"` + DownloadCount int `json:"download_count"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + BrowserDownloadUrl string `json:"browser_download_url"` +} + +type Reactions struct { + Url string `json:"url"` + TotalCount int `json:"total_count"` + PlusOne int `json:"+1"` + MinusOne int `json:"-1"` + Laugh int `json:"laugh"` + Hooray int `json:"hooray"` + Confused int `json:"confused"` + Heart int `json:"heart"` + Rocket int `json:"rocket"` + Eyes int `json:"eyes"` +} + +type FileInfo struct { + Name string `json:"name"` + Path string `json:"path"` + Sha string `json:"sha"` + Size int64 `json:"size"` + Url string `json:"url"` + HtmlUrl string `json:"html_url"` + GitUrl string `json:"git_url"` + DownloadUrl string `json:"download_url"` + Type string `json:"type"` +} diff --git a/drivers/github_releases/types.go b/drivers/github_releases/types.go index 733460dc..b0a9ee61 100644 --- a/drivers/github_releases/types.go +++ b/drivers/github_releases/types.go @@ -1,19 +1,181 @@ package github_releases import ( + "encoding/json" + "strings" "time" "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" ) +type MountPoint struct { + Point string // 挂载点 + Repo string // 仓库名 owner/repo + Release *Release // Release 指针 latest + Releases *[]Release // []Release 指针 + OtherFile *[]FileInfo // 仓库根目录下的其他文件 +} + +// 请求最新版本 +func (m *MountPoint) RequestRelease(get func(url string) (*resty.Response, error), refresh bool) { + if m.Repo == "" { + return + } + + if m.Release == nil || refresh { + resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases/latest") + m.Release = new(Release) + json.Unmarshal(resp.Body(), m.Release) + } +} + +// 请求所有版本 +func (m *MountPoint) RequestReleases(get func(url string) (*resty.Response, error), refresh bool) { + if m.Repo == "" { + return + } + + if m.Releases == nil || refresh { + resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases") + m.Releases = new([]Release) + json.Unmarshal(resp.Body(), m.Releases) + } +} + +// 获取最新版本 +func (m *MountPoint) GetLatestRelease() []File { + files := make([]File, 0) + for _, asset := range m.Release.Assets { + files = append(files, File{ + Path: m.Point + "/" + asset.Name, + FileName: asset.Name, + Size: asset.Size, + Type: "file", + UpdateAt: asset.UpdatedAt, + CreateAt: asset.CreatedAt, + Url: asset.BrowserDownloadUrl, + }) + } + return files +} + +// 获取最新版本大小 +func (m *MountPoint) GetLatestSize() int64 { + size := int64(0) + for _, asset := range m.Release.Assets { + size += asset.Size + } + return size +} + +// 获取所有版本 +func (m *MountPoint) GetAllVersion() []File { + files := make([]File, 0) + for _, release := range *m.Releases { + file := File{ + Path: m.Point + "/" + release.TagName, + FileName: release.TagName, + Size: m.GetSizeByTagName(release.TagName), + Type: "dir", + UpdateAt: release.PublishedAt, + CreateAt: release.CreatedAt, + Url: release.HtmlUrl, + } + for _, asset := range release.Assets { + file.Size += asset.Size + } + files = append(files, file) + } + return files +} + +// 根据版本号获取版本 +func (m *MountPoint) GetReleaseByTagName(tagName string) []File { + for _, item := range *m.Releases { + if item.TagName == tagName { + files := make([]File, 0) + for _, asset := range item.Assets { + files = append(files, File{ + Path: m.Point + "/" + tagName + "/" + asset.Name, + FileName: asset.Name, + Size: asset.Size, + Type: "file", + UpdateAt: asset.UpdatedAt, + CreateAt: asset.CreatedAt, + Url: asset.BrowserDownloadUrl, + }) + } + return files + } + } + return nil +} + +// 根据版本号获取版本大小 +func (m *MountPoint) GetSizeByTagName(tagName string) int64 { + if m.Releases == nil { + return 0 + } + for _, item := range *m.Releases { + if item.TagName == tagName { + size := int64(0) + for _, asset := range item.Assets { + size += asset.Size + } + return size + } + } + return 0 +} + +// 获取所有版本大小 +func (m *MountPoint) GetAllVersionSize() int64 { + if m.Releases == nil { + return 0 + } + size := int64(0) + for _, release := range *m.Releases { + for _, asset := range release.Assets { + size += asset.Size + } + } + return size +} + +func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File { + if m.OtherFile == nil || refresh { + resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents") + m.OtherFile = new([]FileInfo) + json.Unmarshal(resp.Body(), m.OtherFile) + } + + files := make([]File, 0) + defaultTime := "1970-01-01T00:00:00Z" + for _, file := range *m.OtherFile { + if strings.HasSuffix(file.Name, ".md") || strings.HasPrefix(file.Name, "LICENSE") { + files = append(files, File{ + Path: m.Point + "/" + file.Name, + FileName: file.Name, + Size: file.Size, + Type: "file", + UpdateAt: defaultTime, + CreateAt: defaultTime, + Url: file.DownloadUrl, + }) + } + } + return files +} + type File struct { - FileName string `json:"name"` - Size int64 `json:"size"` - CreateAt time.Time `json:"time"` - UpdateAt time.Time `json:"chtime"` - Url string `json:"url"` - Type string `json:"type"` - Path string `json:"path"` + Path string // 文件路径 + FileName string // 文件名 + Size int64 // 文件大小 + Type string // 文件类型 + UpdateAt string // 更新时间 eg:"2025-01-27T16:10:16Z" + CreateAt string // 创建时间 + Url string // 下载链接 } func (f File) GetHash() utils.HashInfo { @@ -33,11 +195,13 @@ func (f File) GetName() string { } func (f File) ModTime() time.Time { - return f.UpdateAt + t, _ := time.Parse(time.RFC3339, f.CreateAt) + return t } func (f File) CreateTime() time.Time { - return f.CreateAt + t, _ := time.Parse(time.RFC3339, f.CreateAt) + return t } func (f File) IsDir() bool { @@ -47,22 +211,3 @@ func (f File) IsDir() bool { func (f File) GetID() string { return f.Url } - -func (f File) Thumb() string { - return "" -} - -type ReleasesData struct { - Files []File `json:"files"` - Size int64 `json:"size"` - UpdateAt time.Time `json:"chtime"` - CreateAt time.Time `json:"time"` - Url string `json:"url"` -} - -type Release struct { - Path string // 挂载路径 - RepoName string // 仓库名称 - Version string // 版本号, tag - ID string // 版本ID -} diff --git a/drivers/github_releases/util.go b/drivers/github_releases/util.go index b2d79c0b..df846e8a 100644 --- a/drivers/github_releases/util.go +++ b/drivers/github_releases/util.go @@ -2,28 +2,36 @@ package github_releases import ( "fmt" - "regexp" + "path/filepath" "strings" - "sync" - "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/go-resty/resty/v2" - jsoniter "github.com/json-iterator/go" log "github.com/sirupsen/logrus" ) -var ( - cache = make(map[string]*resty.Response) - created = make(map[string]time.Time) - mu sync.Mutex - req *resty.Request -) +// 发送 GET 请求 +func (d *GithubReleases) GetRequest(url string) (*resty.Response, error) { + req := base.RestyClient.R() + req.SetHeader("Accept", "application/vnd.github+json") + req.SetHeader("X-GitHub-Api-Version", "2022-11-28") + if d.Addition.Token != "" { + req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", d.Addition.Token)) + } + res, err := req.Get(url) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + log.Warn("failed to get request: ", res.StatusCode(), res.String()) + } + return res, nil +} -// 解析仓库列表 -func ParseRepos(text string, allVersion bool) ([]Release, error) { +// 解析挂载结构 +func (d *GithubReleases) ParseRepos(text string) ([]MountPoint, error) { lines := strings.Split(text, "\n") - var repos []Release + points := make([]MountPoint, 0) for _, line := range lines { line = strings.TrimSpace(line) if line == "" { @@ -41,177 +49,37 @@ func ParseRepos(text string, allVersion bool) ([]Release, error) { return nil, fmt.Errorf("invalid format: %s", line) } - if allVersion { - releases, _ := GetAllVersion(repo, path) - repos = append(repos, *releases...) - } else { - repos = append(repos, Release{ - Path: path, - RepoName: repo, - Version: "latest", - ID: "latest", - }) - } - + points = append(points, MountPoint{ + Point: path, + Repo: repo, + Release: nil, + Releases: nil, + }) } - return repos, nil + d.points = points + return points, nil } // 获取下一级目录 func GetNextDir(wholePath string, basePath string) string { - if !strings.HasSuffix(basePath, "/") { - basePath += "/" - } + basePath = fmt.Sprintf("%s/", strings.TrimRight(basePath, "/")) if !strings.HasPrefix(wholePath, basePath) { return "" } remainingPath := strings.TrimLeft(strings.TrimPrefix(wholePath, basePath), "/") if remainingPath != "" { parts := strings.Split(remainingPath, "/") - return parts[0] + nextDir := parts[0] + if strings.HasPrefix(wholePath, strings.TrimRight(basePath, "/")+"/"+nextDir) { + return nextDir + } } return "" } -// 发送 GET 请求 -func GetRequest(url string, cacheExpiration int) (*resty.Response, error) { - mu.Lock() - if res, ok := cache[url]; ok && time.Now().Before(created[url].Add(time.Duration(cacheExpiration)*time.Minute)) { - mu.Unlock() - return res, nil - } - mu.Unlock() - - res, err := req.Get(url) - if err != nil { - return nil, err - } - if res.StatusCode() != 200 { - log.Warn("failed to get request: ", res.StatusCode(), res.String()) - } - - mu.Lock() - cache[url] = res - created[url] = time.Now() - mu.Unlock() - - return res, nil -} - -// 获取 README、LICENSE 等文件 -func GetGithubOtherFile(repo string, basePath string, cacheExpiration int) (*[]File, error) { - url := fmt.Sprintf("https://api.github.com/repos/%s/contents/", strings.Trim(repo, "/")) - res, _ := GetRequest(url, cacheExpiration) - body := jsoniter.Get(res.Body()) - var files []File - for i := 0; i < body.Size(); i++ { - filename := body.Get(i, "name").ToString() - - re := regexp.MustCompile(`(?i)^(.*\.md|LICENSE)$`) - - if !re.MatchString(filename) { - continue - } - - files = append(files, File{ - FileName: filename, - Size: body.Get(i, "size").ToInt64(), - CreateAt: time.Time{}, - UpdateAt: time.Now(), - Url: body.Get(i, "download_url").ToString(), - Type: body.Get(i, "type").ToString(), - Path: fmt.Sprintf("%s/%s", basePath, filename), - }) - } - return &files, nil -} - -// 获取 GitHub Release 详细信息 -func GetRepoReleaseInfo(repo string, version string, basePath string, cacheExpiration int) (*ReleasesData, error) { - url := fmt.Sprintf("https://api.github.com/repos/%s/releases/%s", strings.Trim(repo, "/"), version) - res, _ := GetRequest(url, cacheExpiration) - body := res.Body() - - if jsoniter.Get(res.Body(), "status").ToInt64() != 0 { - return &ReleasesData{}, fmt.Errorf("%s", res.String()) - } - - assets := jsoniter.Get(res.Body(), "assets") - var files []File - - for i := 0; i < assets.Size(); i++ { - filename := assets.Get(i, "name").ToString() - - files = append(files, File{ - FileName: filename, - Size: assets.Get(i, "size").ToInt64(), - Url: assets.Get(i, "browser_download_url").ToString(), - Type: assets.Get(i, "content_type").ToString(), - Path: fmt.Sprintf("%s/%s", basePath, filename), - - CreateAt: func() time.Time { - t, _ := time.Parse(time.RFC3339, assets.Get(i, "created_at").ToString()) - return t - }(), - UpdateAt: func() time.Time { - t, _ := time.Parse(time.RFC3339, assets.Get(i, "updated_at").ToString()) - return t - }(), - }) - } - - return &ReleasesData{ - Files: files, - Url: jsoniter.Get(body, "html_url").ToString(), - - Size: func() int64 { - size := int64(0) - for _, file := range files { - size += file.Size - } - return size - }(), - UpdateAt: func() time.Time { - t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "published_at").ToString()) - return t - }(), - CreateAt: func() time.Time { - t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "created_at").ToString()) - return t - }(), - }, nil -} - -// 获取所有的版本号 -func GetAllVersion(repo string, path string) (*[]Release, error) { - url := fmt.Sprintf("https://api.github.com/repos/%s/releases", strings.Trim(repo, "/")) - res, _ := GetRequest(url, 0) - body := jsoniter.Get(res.Body()) - releases := make([]Release, 0) - for i := 0; i < body.Size(); i++ { - version := body.Get(i, "tag_name").ToString() - releases = append(releases, Release{ - Path: fmt.Sprintf("%s/%s", path, version), - Version: version, - RepoName: repo, - ID: body.Get(i, "id").ToString(), - }) - } - return &releases, nil -} - -func ClearCache() { - mu.Lock() - cache = make(map[string]*resty.Response) - created = make(map[string]time.Time) - mu.Unlock() -} - -func SetHeader(token string) { - req = base.RestyClient.R() - if token != "" { - req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)) - } - req.SetHeader("Accept", "application/vnd.github+json") - req.SetHeader("X-GitHub-Api-Version", "2022-11-28") +// 判断当前目录是否是目标目录的祖先目录 +func IsAncestorDir(parentDir string, targetDir string) bool { + absTargetDir, _ := filepath.Abs(targetDir) + absParentDir, _ := filepath.Abs(parentDir) + return strings.HasPrefix(absTargetDir, absParentDir) } From d983a4ebcb481eb3fef9080c1262b8e64997220b Mon Sep 17 00:00:00 2001 From: "Feng.YJ" <32027253+huiyifyj@users.noreply.github.com> Date: Sun, 9 Feb 2025 18:30:56 +0800 Subject: [PATCH 110/187] refactor(cmd): use std `runtime` package to get go version info (#7964) * refactor(cmd): use std `runtime` package to get go version info - Remove the `GoVersion` variable. - Remove overriding `GoVersion` by ldflags in `build.sh`. - Get go version, OS and arch from the constants in the std `runtime` package instead of compile time. * chore(ci): remove `GoVersion` flag from workflows Remove GoVersion flag from beta_release.yml and build.yml workflows. > Reduce compile-time dependencies. --- .github/workflows/beta_release.yml | 1 - .github/workflows/build.yml | 1 - build.sh | 2 -- cmd/version.go | 6 ++++-- internal/conf/var.go | 1 - 5 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/beta_release.yml b/.github/workflows/beta_release.yml index 3c52b4c4..485942c4 100644 --- a/.github/workflows/beta_release.yml +++ b/.github/workflows/beta_release.yml @@ -94,7 +94,6 @@ jobs: out-dir: build x-flags: | github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at - github.com/alist-org/alist/v3/internal/conf.GoVersion=$go_version github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit github.com/alist-org/alist/v3/internal/conf.Version=$tag diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fe037f43..a2c934e7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,7 +49,6 @@ jobs: out-dir: build x-flags: | github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at - github.com/alist-org/alist/v3/internal/conf.GoVersion=$go_version github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit github.com/alist-org/alist/v3/internal/conf.Version=$tag diff --git a/build.sh b/build.sh index d6e001c2..2dee8e20 100644 --- a/build.sh +++ b/build.sh @@ -1,6 +1,5 @@ appName="alist" builtAt="$(date +'%F %T %z')" -goVersion=$(go version | sed 's/go version //') gitAuthor="Xhofe " gitCommit=$(git log --pretty=format:"%h" -1) @@ -22,7 +21,6 @@ echo "frontend version: $webVersion" ldflags="\ -w -s \ -X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \ --X 'github.com/alist-org/alist/v3/internal/conf.GoVersion=$goVersion' \ -X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \ -X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \ -X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \ diff --git a/cmd/version.go b/cmd/version.go index cdf4d71f..a758816e 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -6,6 +6,7 @@ package cmd import ( "fmt" "os" + "runtime" "github.com/alist-org/alist/v3/internal/conf" "github.com/spf13/cobra" @@ -16,14 +17,15 @@ var VersionCmd = &cobra.Command{ Use: "version", Short: "Show current version of AList", Run: func(cmd *cobra.Command, args []string) { + goVersion := fmt.Sprintf("%s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Printf(`Built At: %s Go Version: %s Author: %s Commit ID: %s Version: %s WebVersion: %s -`, - conf.BuiltAt, conf.GoVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion) +`, conf.BuiltAt, goVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion) os.Exit(0) }, } diff --git a/internal/conf/var.go b/internal/conf/var.go index 0a8eb16f..7ae1a5ab 100644 --- a/internal/conf/var.go +++ b/internal/conf/var.go @@ -7,7 +7,6 @@ import ( var ( BuiltAt string - GoVersion string GitAuthor string GitCommit string Version string = "dev" From 0219c4e15a452cf9a3c881dd4e49ed654e17a772 Mon Sep 17 00:00:00 2001 From: Jealous Date: Sun, 9 Feb 2025 18:31:43 +0800 Subject: [PATCH 111/187] fix(index): fix the issue where ignored paths are not updated (#7907) --- internal/search/util.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/internal/search/util.go b/internal/search/util.go index 8d03b740..2e6ac8da 100644 --- a/internal/search/util.go +++ b/internal/search/util.go @@ -38,7 +38,7 @@ func WriteProgress(progress *model.IndexProgress) { } } -func updateIgnorePaths() { +func updateIgnorePaths(customIgnorePaths string) { storages := op.GetAllStorages() ignorePaths := make([]string, 0) var skipDrivers = []string{"AList V2", "AList V3", "Virtual"} @@ -66,7 +66,6 @@ func updateIgnorePaths() { } } } - customIgnorePaths := setting.GetStr(conf.IgnorePaths) if customIgnorePaths != "" { ignorePaths = append(ignorePaths, strings.Split(customIgnorePaths, "\n")...) } @@ -84,13 +83,13 @@ func isIgnorePath(path string) bool { func init() { op.RegisterSettingItemHook(conf.IgnorePaths, func(item *model.SettingItem) error { - updateIgnorePaths() + updateIgnorePaths(item.Value) return nil }) op.RegisterStorageHook(func(typ string, storage driver.Driver) { var skipDrivers = []string{"AList V2", "AList V3", "Virtual"} if utils.SliceContains(skipDrivers, storage.Config().Name) { - updateIgnorePaths() + updateIgnorePaths(setting.GetStr(conf.IgnorePaths)) } }) } From b9ad18bd0a668a9f2839f480d24022e3ca5cf0aa Mon Sep 17 00:00:00 2001 From: Jealous Date: Sun, 9 Feb 2025 18:32:57 +0800 Subject: [PATCH 112/187] feat(recursive-move): Advanced conflict policy for preventing unintentional overwriting (#7906) --- server/common/common.go | 26 +++++++++++++++++--------- server/handles/const.go | 7 +++++++ server/handles/fsbatch.go | 22 +++++++++++++--------- 3 files changed, 37 insertions(+), 18 deletions(-) create mode 100644 server/handles/const.go diff --git a/server/common/common.go b/server/common/common.go index e231ffe6..33ae704e 100644 --- a/server/common/common.go +++ b/server/common/common.go @@ -68,21 +68,29 @@ func ErrorStrResp(c *gin.Context, str string, code int, l ...bool) { } func SuccessResp(c *gin.Context, data ...interface{}) { - if len(data) == 0 { - c.JSON(200, Resp[interface{}]{ - Code: 200, - Message: "success", - Data: nil, - }) - return + SuccessWithMsgResp(c, "success", data...) +} + +func SuccessWithMsgResp(c *gin.Context, msg string, data ...interface{}) { + var respData interface{} + if len(data) > 0 { + respData = data[0] } + c.JSON(200, Resp[interface{}]{ Code: 200, - Message: "success", - Data: data[0], + Message: msg, + Data: respData, }) } +func Pluralize(count int, singular, plural string) string { + if count == 1 { + return singular + } + return plural +} + func GetHttpReq(ctx context.Context) *http.Request { if c, ok := ctx.(*gin.Context); ok { return c.Request diff --git a/server/handles/const.go b/server/handles/const.go new file mode 100644 index 00000000..b108c9da --- /dev/null +++ b/server/handles/const.go @@ -0,0 +1,7 @@ +package handles + +const ( + CANCEL = "cancel" + OVERWRITE = "overwrite" + SKIP = "skip" +) diff --git a/server/handles/fsbatch.go b/server/handles/fsbatch.go index dd7b7e47..3841bff5 100644 --- a/server/handles/fsbatch.go +++ b/server/handles/fsbatch.go @@ -16,9 +16,9 @@ import ( ) type RecursiveMoveReq struct { - SrcDir string `json:"src_dir"` - DstDir string `json:"dst_dir"` - Overwrite bool `json:"overwrite"` + SrcDir string `json:"src_dir"` + DstDir string `json:"dst_dir"` + ConflictPolicy string `json:"conflict_policy"` } func FsRecursiveMove(c *gin.Context) { @@ -60,7 +60,7 @@ func FsRecursiveMove(c *gin.Context) { } var existingFileNames []string - if !req.Overwrite { + if req.ConflictPolicy != OVERWRITE { dstFiles, err := fs.List(c, dstDir, &fs.ListArgs{}) if err != nil { common.ErrorResp(c, err, 500) @@ -99,25 +99,28 @@ func FsRecursiveMove(c *gin.Context) { filePathMap[subFile] = subFilePath } } else { - if movingFilePath == dstDir { // same directory, don't move continue } - if !req.Overwrite { - if slices.Contains(existingFileNames, movingFile.GetName()) { + if slices.Contains(existingFileNames, movingFile.GetName()) { + if req.ConflictPolicy == CANCEL { common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", movingFile.GetName()), 403) return + } else if req.ConflictPolicy == SKIP { + continue } + } else if req.ConflictPolicy != OVERWRITE { existingFileNames = append(existingFileNames, movingFile.GetName()) } - movingFileNames = append(movingFileNames, movingFileName) + } } + var count = 0 for i, fileName := range movingFileNames { // move err := fs.Move(c, fileName, dstDir, len(movingFileNames) > i+1) @@ -125,9 +128,10 @@ func FsRecursiveMove(c *gin.Context) { common.ErrorResp(c, err, 500) return } + count++ } - common.SuccessResp(c) + common.SuccessWithMsgResp(c, fmt.Sprintf("Successfully moved %d %s", count, common.Pluralize(count, "file", "files"))) } type BatchRenameReq struct { From 3f9bed3d5f54f559807af8190a9313962aebf982 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sun, 9 Feb 2025 18:33:38 +0800 Subject: [PATCH 113/187] feat(bootstrap): add `.url` to proxy types (#7928) --- internal/bootstrap/data/setting.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index bcb64f79..5e8a2be4 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -114,7 +114,7 @@ func InitialSettings() []model.SettingItem { {Key: conf.VideoTypes, Value: "mp4,mkv,avi,mov,rmvb,webm,flv,m3u8", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, {Key: conf.ImageTypes, Value: "jpg,tiff,jpeg,png,gif,bmp,svg,ico,swf,webp", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, //{Key: conf.OfficeTypes, Value: "doc,docx,xls,xlsx,ppt,pptx", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, - {Key: conf.ProxyTypes, Value: "m3u8", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, + {Key: conf.ProxyTypes, Value: "m3u8,url", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, {Key: conf.ProxyIgnoreHeaders, Value: "authorization,referer", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, {Key: "external_previews", Value: `{}`, Type: conf.TypeText, Group: model.PREVIEW}, {Key: "iframe_previews", Value: `{ From ec3fc945a35d08357a1109c54f7adf46b6cc379c Mon Sep 17 00:00:00 2001 From: Sakana Date: Sun, 9 Feb 2025 18:35:39 +0800 Subject: [PATCH 114/187] fix(feiji): modify the request header (#7902 close #7890) --- drivers/ilanzou/util.go | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ilanzou/util.go b/drivers/ilanzou/util.go index b8fd5280..ea942795 100644 --- a/drivers/ilanzou/util.go +++ b/drivers/ilanzou/util.go @@ -73,6 +73,7 @@ func (d *ILanZou) request(pathname, method string, callback base.ReqCallback, pr "Referer": d.conf.site + "/", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0", "Accept-Encoding": "gzip, deflate, br, zstd", + "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5", }) if callback != nil { From f25be154c692eeffcde9fe3589848f6af48465f5 Mon Sep 17 00:00:00 2001 From: YangRucheng Date: Sun, 16 Feb 2025 12:20:28 +0800 Subject: [PATCH 115/187] fix(ilanzou): add header `X-Forwarded-For` to solve IP ban (#7977) * fix: warning * feat: ip header * fix: ip header for fs link --- drivers/ilanzou/driver.go | 17 ++++++++++++----- drivers/ilanzou/meta.go | 1 + drivers/ilanzou/util.go | 4 ++++ 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/ilanzou/driver.go b/drivers/ilanzou/driver.go index 8681fed4..22d1589f 100644 --- a/drivers/ilanzou/driver.go +++ b/drivers/ilanzou/driver.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "encoding/hex" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "net/url" @@ -14,6 +13,8 @@ import ( "strings" "time" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -121,7 +122,7 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs) if err != nil { return nil, err } - ts, ts_str, err := getTimestamp(d.conf.secret) + ts, ts_str, _ := getTimestamp(d.conf.secret) params := []string{ "uuid=" + url.QueryEscape(d.UUID), @@ -150,11 +151,17 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs) u.RawQuery = strings.Join(params, "&") realURL := u.String() // get the url after redirect - res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{ - //"Origin": d.conf.site, + req := base.NoRedirectClient.R() + + req.SetHeaders(map[string]string{ "Referer": d.conf.site + "/", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0", - }).Get(realURL) + }) + if d.Addition.Ip != "" { + req.SetHeader("X-Forwarded-For", d.Addition.Ip) + } + + res, err := req.Get(realURL) if err != nil { return nil, err } diff --git a/drivers/ilanzou/meta.go b/drivers/ilanzou/meta.go index f15fc01a..7a4a00fb 100644 --- a/drivers/ilanzou/meta.go +++ b/drivers/ilanzou/meta.go @@ -9,6 +9,7 @@ type Addition struct { driver.RootID Username string `json:"username" type:"string" required:"true"` Password string `json:"password" type:"string" required:"true"` + Ip string `json:"ip" type:"string"` Token string UUID string diff --git a/drivers/ilanzou/util.go b/drivers/ilanzou/util.go index ea942795..81773afb 100644 --- a/drivers/ilanzou/util.go +++ b/drivers/ilanzou/util.go @@ -76,6 +76,10 @@ func (d *ILanZou) request(pathname, method string, callback base.ReqCallback, pr "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5", }) + if d.Addition.Ip != "" { + req.SetHeader("X-Forwarded-For", d.Addition.Ip) + } + if callback != nil { callback(req) } From 36b42046230cdc078744302eb47937eefb863ee5 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Sun, 16 Feb 2025 12:21:03 +0800 Subject: [PATCH 116/187] feat(github): support github proxy (#7979 close #7963) --- drivers/github/driver.go | 17 ++++++++++++++--- drivers/github/meta.go | 3 ++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/github/driver.go b/drivers/github/driver.go index 996c79c7..dee4cbbf 100644 --- a/drivers/github/driver.go +++ b/drivers/github/driver.go @@ -85,10 +85,13 @@ func (d *Github) Init(ctx context.Context) error { } d.client = base.NewRestyClient(). SetHeader("Accept", "application/vnd.github.object+json"). - SetHeader("Authorization", "Bearer "+d.Token). SetHeader("X-GitHub-Api-Version", "2022-11-28"). SetLogger(log.StandardLogger()). SetDebug(false) + token := strings.TrimSpace(d.Token) + if token != "" { + d.client = d.client.SetHeader("Authorization", "Bearer "+token) + } if d.Ref == "" { repo, err := d.getRepo() if err != nil { @@ -149,8 +152,13 @@ func (d *Github) Link(ctx context.Context, file model.Obj, args model.LinkArgs) if obj.Type == "submodule" { return nil, errors.New("cannot download a submodule") } + url := obj.DownloadURL + ghProxy := strings.TrimSpace(d.Addition.GitHubProxy) + if ghProxy != "" { + url = strings.Replace(url, "https://raw.githubusercontent.com", ghProxy, 1) + } return &model.Link{ - URL: obj.DownloadURL, + URL: url, }, nil } @@ -679,8 +687,11 @@ func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.Up return "", err } req.Header.Set("Accept", "application/vnd.github+json") - req.Header.Set("Authorization", "Bearer "+d.Token) req.Header.Set("X-GitHub-Api-Version", "2022-11-28") + token := strings.TrimSpace(d.Token) + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } req.ContentLength = length res, err := base.HttpClient.Do(req) diff --git a/drivers/github/meta.go b/drivers/github/meta.go index 0df4aa60..05e704be 100644 --- a/drivers/github/meta.go +++ b/drivers/github/meta.go @@ -7,10 +7,11 @@ import ( type Addition struct { driver.RootPath - Token string `json:"token" type:"string" required:"true"` + Token string `json:"token" type:"string"` Owner string `json:"owner" type:"string" required:"true"` Repo string `json:"repo" type:"string" required:"true"` Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."` + GitHubProxy string `json:"gh_proxy" type:"string" help:"GitHub proxy, e.g. https://ghproxy.net/raw.githubusercontent.com or https://gh-proxy.com/raw.githubusercontent.com"` CommitterName string `json:"committer_name" type:"string"` CommitterEmail string `json:"committer_email" type:"string"` AuthorName string `json:"author_name" type:"string"` From 399336b33c344768109596cf5d088270d3e1f522 Mon Sep 17 00:00:00 2001 From: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Sun, 16 Feb 2025 12:21:34 +0800 Subject: [PATCH 117/187] fix(189pc): transfer rename (#7958) * fix(189pc): transfer rename * fix: OverwriteUpload * fix: change search method * fix * fix --- drivers/189pc/driver.go | 73 ++++++++----- drivers/189pc/help.go | 10 ++ drivers/189pc/utils.go | 220 +++++++++++++++++++++++----------------- pkg/utils/time.go | 25 +++-- 4 files changed, 201 insertions(+), 127 deletions(-) diff --git a/drivers/189pc/driver.go b/drivers/189pc/driver.go index 6b502de0..c91caf2f 100644 --- a/drivers/189pc/driver.go +++ b/drivers/189pc/driver.go @@ -1,8 +1,8 @@ package _189pc import ( - "container/ring" "context" + "fmt" "net/http" "strconv" "strings" @@ -14,6 +14,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" + "github.com/google/uuid" ) type Cloud189PC struct { @@ -29,7 +30,7 @@ type Cloud189PC struct { uploadThread int - familyTransferFolder *ring.Ring + familyTransferFolder *Cloud189Folder cleanFamilyTransferFile func() storageConfig driver.Config @@ -48,9 +49,18 @@ func (y *Cloud189PC) GetAddition() driver.Additional { } func (y *Cloud189PC) Init(ctx context.Context) (err error) { - // 兼容旧上传接口 - y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old") - + y.storageConfig = config + if y.isFamily() { + // 兼容旧上传接口 + if y.Addition.RapidUpload || y.Addition.UploadMethod == "old" { + y.storageConfig.NoOverwriteUpload = true + } + } else { + // 家庭云转存,不支持覆盖上传 + if y.Addition.FamilyTransfer { + y.storageConfig.NoOverwriteUpload = true + } + } // 处理个人云和家庭云参数 if y.isFamily() && y.RootFolderID == "-11" { y.RootFolderID = "" @@ -91,13 +101,14 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) { } } - // 创建中转文件夹,防止重名文件 + // 创建中转文件夹 if y.FamilyTransfer { - if y.familyTransferFolder, err = y.createFamilyTransferFolder(32); err != nil { + if err := y.createFamilyTransferFolder(); err != nil { return err } } + // 清理转存文件节流 y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() { if err := y.cleanFamilyTransfer(context.TODO()); err != nil { utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err) @@ -327,35 +338,49 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if !isFamily && y.FamilyTransfer { // 修改上传目标为家庭云文件夹 transferDstDir := dstDir - dstDir = (y.familyTransferFolder.Value).(*Cloud189Folder) - y.familyTransferFolder = y.familyTransferFolder.Next() + dstDir = y.familyTransferFolder + // 使用临时文件名 + srcName := stream.GetName() + stream = &WrapFileStreamer{ + FileStreamer: stream, + Name: fmt.Sprintf("0%s.transfer", uuid.NewString()), + } + + // 使用家庭云上传 isFamily = true overwrite = false defer func() { if newObj != nil { - // 批量任务有概率删不掉 - y.cleanFamilyTransferFile() - // 转存家庭云文件到个人云 err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true) - - task := BatchTaskInfo{ - FileId: newObj.GetID(), - FileName: newObj.GetName(), - IsFolder: BoolToNumber(newObj.IsDir()), + // 删除家庭云源文件 + go y.Delete(context.TODO(), y.FamilyID, newObj) + // 批量任务有概率删不掉 + go y.cleanFamilyTransferFile() + // 转存失败返回错误 + if err != nil { + return } - // 删除源文件 - if resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, task); err == nil { - y.WaitBatchTask("DELETE", resp.TaskID, time.Second) - // 永久删除 - if resp, err := y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, task); err == nil { - y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second) + // 查找转存文件 + var file *Cloud189File + file, err = y.findFileByName(context.TODO(), newObj.GetName(), transferDstDir.GetID(), false) + if err != nil { + if err == errs.ObjectNotFound { + err = fmt.Errorf("unknown error: No transfer file obtained %s", newObj.GetName()) } + return } - newObj = nil + + // 重命名转存文件 + newObj, err = y.Rename(context.TODO(), file, srcName) + if err != nil { + // 重命名失败删除源文件 + _ = y.Delete(context.TODO(), "", file) + } + return } }() } diff --git a/drivers/189pc/help.go b/drivers/189pc/help.go index 49f957fa..bac8880a 100644 --- a/drivers/189pc/help.go +++ b/drivers/189pc/help.go @@ -18,6 +18,7 @@ import ( "strings" "time" + "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils/random" ) @@ -208,3 +209,12 @@ func IF[V any](o bool, t V, f V) V { } return f } + +type WrapFileStreamer struct { + model.FileStreamer + Name string +} + +func (w *WrapFileStreamer) GetName() string { + return w.Name +} diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index 0c3e5404..6f3c4dcf 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -2,7 +2,6 @@ package _189pc import ( "bytes" - "container/ring" "context" "crypto/md5" "encoding/base64" @@ -23,6 +22,7 @@ import ( "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/setting" @@ -185,39 +185,9 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str return body, nil } func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) { - fullUrl := API_URL - if isFamily { - fullUrl += "/family/file" - } - fullUrl += "/listFiles.action" - - res := make([]model.Obj, 0, 130) + res := make([]model.Obj, 0, 100) for pageNum := 1; ; pageNum++ { - var resp Cloud189FilesResp - _, err := y.get(fullUrl, func(r *resty.Request) { - r.SetContext(ctx) - r.SetQueryParams(map[string]string{ - "folderId": fileId, - "fileType": "0", - "mediaAttr": "0", - "iconOption": "5", - "pageNum": fmt.Sprint(pageNum), - "pageSize": "130", - }) - if isFamily { - r.SetQueryParams(map[string]string{ - "familyId": y.FamilyID, - "orderBy": toFamilyOrderBy(y.OrderBy), - "descending": toDesc(y.OrderDirection), - }) - } else { - r.SetQueryParams(map[string]string{ - "recursive": "0", - "orderBy": y.OrderBy, - "descending": toDesc(y.OrderDirection), - }) - } - }, &resp, isFamily) + resp, err := y.getFilesWithPage(ctx, fileId, isFamily, pageNum, 1000, y.OrderBy, y.OrderDirection) if err != nil { return nil, err } @@ -236,6 +206,63 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) return res, nil } +func (y *Cloud189PC) getFilesWithPage(ctx context.Context, fileId string, isFamily bool, pageNum int, pageSize int, orderBy string, orderDirection string) (*Cloud189FilesResp, error) { + fullUrl := API_URL + if isFamily { + fullUrl += "/family/file" + } + fullUrl += "/listFiles.action" + + var resp Cloud189FilesResp + _, err := y.get(fullUrl, func(r *resty.Request) { + r.SetContext(ctx) + r.SetQueryParams(map[string]string{ + "folderId": fileId, + "fileType": "0", + "mediaAttr": "0", + "iconOption": "5", + "pageNum": fmt.Sprint(pageNum), + "pageSize": fmt.Sprint(pageSize), + }) + if isFamily { + r.SetQueryParams(map[string]string{ + "familyId": y.FamilyID, + "orderBy": toFamilyOrderBy(orderBy), + "descending": toDesc(orderDirection), + }) + } else { + r.SetQueryParams(map[string]string{ + "recursive": "0", + "orderBy": orderBy, + "descending": toDesc(orderDirection), + }) + } + }, &resp, isFamily) + if err != nil { + return nil, err + } + return &resp, nil +} + +func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, folderId string, isFamily bool) (*Cloud189File, error) { + for pageNum := 1; ; pageNum++ { + resp, err := y.getFilesWithPage(ctx, folderId, isFamily, pageNum, 10, "filename", "asc") + if err != nil { + return nil, err + } + // 获取完毕跳出 + if resp.FileListAO.Count == 0 { + return nil, errs.ObjectNotFound + } + for i := 0; i < len(resp.FileListAO.FileList); i++ { + file := resp.FileListAO.FileList[i] + if file.Name == searchName { + return &file, nil + } + } + } +} + func (y *Cloud189PC) login() (err error) { // 初始化登陆所需参数 if y.loginParam == nil { @@ -902,8 +929,7 @@ func (y *Cloud189PC) isLogin() bool { } // 创建家庭云中转文件夹 -func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) { - folders := ring.New(count) +func (y *Cloud189PC) createFamilyTransferFolder() error { var rootFolder Cloud189Folder _, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) { req.SetQueryParams(map[string]string{ @@ -912,81 +938,61 @@ func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) { }) }, &rootFolder, true) if err != nil { - return nil, err + return err } - - folderCount := 0 - - // 获取已有目录 - files, err := y.getFiles(context.TODO(), rootFolder.GetID(), true) - if err != nil { - return nil, err - } - for _, file := range files { - if folder, ok := file.(*Cloud189Folder); ok { - folders.Value = folder - folders = folders.Next() - folderCount++ - } - } - - // 创建新的目录 - for folderCount < count { - var newFolder Cloud189Folder - _, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) { - req.SetQueryParams(map[string]string{ - "folderName": uuid.NewString(), - "familyId": y.FamilyID, - "parentId": rootFolder.GetID(), - }) - }, &newFolder, true) - if err != nil { - return nil, err - } - folders.Value = &newFolder - folders = folders.Next() - folderCount++ - } - return folders, nil + y.familyTransferFolder = &rootFolder + return nil } // 清理中转文件夹 func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error { - var tasks []BatchTaskInfo - r := y.familyTransferFolder - for p := r.Next(); p != r; p = p.Next() { - folder := p.Value.(*Cloud189Folder) - - files, err := y.getFiles(ctx, folder.GetID(), true) + transferFolderId := y.familyTransferFolder.GetID() + for pageNum := 1; ; pageNum++ { + resp, err := y.getFilesWithPage(ctx, transferFolderId, true, pageNum, 100, "lastOpTime", "asc") if err != nil { return err } - for _, file := range files { + // 获取完毕跳出 + if resp.FileListAO.Count == 0 { + break + } + + var tasks []BatchTaskInfo + for i := 0; i < len(resp.FileListAO.FolderList); i++ { + folder := resp.FileListAO.FolderList[i] + tasks = append(tasks, BatchTaskInfo{ + FileId: folder.GetID(), + FileName: folder.GetName(), + IsFolder: BoolToNumber(folder.IsDir()), + }) + } + for i := 0; i < len(resp.FileListAO.FileList); i++ { + file := resp.FileListAO.FileList[i] tasks = append(tasks, BatchTaskInfo{ FileId: file.GetID(), FileName: file.GetName(), IsFolder: BoolToNumber(file.IsDir()), }) } - } - if len(tasks) > 0 { - // 删除 - resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...) - if err != nil { + if len(tasks) > 0 { + // 删除 + resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...) + if err != nil { + return err + } + err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second) + if err != nil { + return err + } + // 永久删除 + resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...) + if err != nil { + return err + } + err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second) return err } - err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second) - if err != nil { - return err - } - // 永久删除 - resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...) - if err != nil { - return err - } - err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second) - return err } return nil } @@ -1063,6 +1069,34 @@ func (y *Cloud189PC) SaveFamilyFileToPersonCloud(ctx context.Context, familyId s } } +// 永久删除文件 +func (y *Cloud189PC) Delete(ctx context.Context, familyId string, srcObj model.Obj) error { + task := BatchTaskInfo{ + FileId: srcObj.GetID(), + FileName: srcObj.GetName(), + IsFolder: BoolToNumber(srcObj.IsDir()), + } + // 删除源文件 + resp, err := y.CreateBatchTask("DELETE", familyId, "", nil, task) + if err != nil { + return err + } + err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second) + if err != nil { + return err + } + // 清除回收站 + resp, err = y.CreateBatchTask("CLEAR_RECYCLE", familyId, "", nil, task) + if err != nil { + return err + } + err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second) + if err != nil { + return err + } + return nil +} + func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) { var resp CreateBatchTaskResp _, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) { diff --git a/pkg/utils/time.go b/pkg/utils/time.go index aa706928..36573b4e 100644 --- a/pkg/utils/time.go +++ b/pkg/utils/time.go @@ -34,31 +34,36 @@ func NewDebounce2(interval time.Duration, f func()) func() { if timer == nil { timer = time.AfterFunc(interval, f) } - (*time.Timer)(timer).Reset(interval) + timer.Reset(interval) } } func NewThrottle(interval time.Duration) func(func()) { var lastCall time.Time - + var lock sync.Mutex return func(fn func()) { + lock.Lock() + defer lock.Unlock() + now := time.Now() - if now.Sub(lastCall) < interval { - return + if now.Sub(lastCall) >= interval { + lastCall = now + go fn() } - time.AfterFunc(interval, fn) - lastCall = now } } func NewThrottle2(interval time.Duration, fn func()) func() { var lastCall time.Time + var lock sync.Mutex return func() { + lock.Lock() + defer lock.Unlock() + now := time.Now() - if now.Sub(lastCall) < interval { - return + if now.Sub(lastCall) >= interval { + lastCall = now + go fn() } - time.AfterFunc(interval, fn) - lastCall = now } } From 3b71500f237c4cb3427ce11cb6675fa9ba7006b6 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Sun, 16 Feb 2025 12:22:11 +0800 Subject: [PATCH 118/187] feat(traffic): support limit task worker count & file stream rate (#7948) * feat: set task workers num & client stream rate limit * feat: server stream rate limit * upgrade xhofe/tache * . --- cmd/common.go | 1 + drivers/115/util.go | 26 ++--- drivers/123/driver.go | 5 +- drivers/123/upload.go | 3 +- drivers/139/driver.go | 6 +- drivers/189/util.go | 6 +- drivers/189pc/utils.go | 19 +++- drivers/alist_v3/driver.go | 4 +- drivers/aliyundrive/driver.go | 13 ++- drivers/aliyundrive_open/upload.go | 7 +- drivers/baidu_netdisk/driver.go | 10 +- drivers/baidu_photo/driver.go | 10 +- drivers/base/client.go | 13 +-- drivers/chaoxing/driver.go | 7 +- drivers/cloudreve/driver.go | 6 +- drivers/cloudreve/util.go | 13 +-- drivers/dropbox/driver.go | 9 +- drivers/ftp/driver.go | 12 +-- drivers/github/driver.go | 8 +- drivers/google_drive/driver.go | 3 +- drivers/google_drive/util.go | 8 +- drivers/google_photo/driver.go | 2 +- drivers/halalcloud/driver.go | 3 +- drivers/ilanzou/driver.go | 6 +- drivers/ipfs_api/driver.go | 12 +-- drivers/kodbox/driver.go | 9 +- drivers/lanzou/driver.go | 10 +- drivers/lark/driver.go | 12 ++- drivers/mediatrack/driver.go | 7 +- drivers/mega/driver.go | 3 +- drivers/misskey/driver.go | 2 +- drivers/misskey/util.go | 18 ++-- drivers/mopan/driver.go | 12 ++- drivers/netease_music/types.go | 13 --- drivers/netease_music/util.go | 14 +-- drivers/onedrive/util.go | 10 +- drivers/onedrive_app/util.go | 10 +- drivers/pikpak/util.go | 19 ++-- drivers/quark_uc/driver.go | 14 +-- drivers/quark_uc/util.go | 9 +- drivers/quqi/driver.go | 14 ++- drivers/s3/driver.go | 11 +-- drivers/seafile/driver.go | 5 +- drivers/sftp/driver.go | 2 +- drivers/smb/driver.go | 2 +- drivers/teambition/driver.go | 2 +- drivers/teambition/util.go | 22 +++-- drivers/terabox/driver.go | 2 +- drivers/thunder/driver.go | 5 +- drivers/thunder_browser/driver.go | 2 +- drivers/thunderx/driver.go | 5 +- drivers/trainbit/driver.go | 7 +- drivers/url_tree/driver.go | 2 +- drivers/uss/driver.go | 11 +-- drivers/vtencent/util.go | 3 +- drivers/webdav/driver.go | 13 +-- drivers/weiyun/driver.go | 7 +- drivers/wopan/driver.go | 2 +- drivers/yandex_disk/driver.go | 4 +- go.mod | 6 +- go.sum | 8 +- internal/bootstrap/data/setting.go | 17 +++- internal/bootstrap/stream_limit.go | 53 ++++++++++ internal/bootstrap/task.go | 39 ++++++-- internal/conf/const.go | 12 +++ internal/driver/driver.go | 68 ++++++++----- internal/driver/utils.go | 62 ++++++++++++ internal/model/setting.go | 1 + internal/net/serve.go | 14 ++- internal/op/setting.go | 9 ++ internal/stream/limit.go | 152 +++++++++++++++++++++++++++++ internal/stream/stream.go | 60 +++++++----- internal/stream/util.go | 20 ++++ server/common/proxy.go | 27 ++++- server/ftp/fsread.go | 7 +- server/ftp/fsup.go | 19 +++- server/middlewares/limit.go | 36 +++++++ server/router.go | 17 ++-- server/webdav.go | 8 +- 79 files changed, 803 insertions(+), 327 deletions(-) create mode 100644 internal/bootstrap/stream_limit.go create mode 100644 internal/driver/utils.go create mode 100644 internal/stream/limit.go diff --git a/cmd/common.go b/cmd/common.go index 47a25f3f..8a73f9b0 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -17,6 +17,7 @@ func Init() { bootstrap.Log() bootstrap.InitDB() data.InitData() + bootstrap.InitStreamLimit() bootstrap.InitIndex() bootstrap.InitUpgradePatch() } diff --git a/drivers/115/util.go b/drivers/115/util.go index 4d3cdd93..7298f565 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -8,8 +8,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/alist-org/alist/v3/internal/driver" - "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "net/url" @@ -20,6 +18,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" @@ -144,7 +143,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e return nil, err } - bytes, err := crypto.Decode(string(result.EncodedData), key) + b, err := crypto.Decode(string(result.EncodedData), key) if err != nil { return nil, err } @@ -152,7 +151,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e downloadInfo := struct { Url string `json:"url"` }{} - if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil { + if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil { return nil, err } @@ -290,13 +289,10 @@ func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSPar } var bodyBytes []byte - r := &stream.ReaderWithCtx{ - Reader: &stream.ReaderUpdatingProgress{ - Reader: s, - UpdateProgress: up, - }, - Ctx: ctx, - } + r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) if err = bucket.PutObject(params.Object, r, append( driver115.OssOption(params, ossToken), oss.CallbackResult(&bodyBytes), @@ -405,16 +401,12 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload } default: } - buf := make([]byte, chunk.Size) if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) { continue } - - if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{ - Reader: bytes.NewBuffer(buf), - Ctx: ctx, - }, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { + if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)), + chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { break } } diff --git a/drivers/123/driver.go b/drivers/123/driver.go index 1bf71ae6..7d457138 100644 --- a/drivers/123/driver.go +++ b/drivers/123/driver.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "encoding/hex" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "net/url" @@ -249,10 +248,10 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea input := &s3manager.UploadInput{ Bucket: &resp.Data.Bucket, Key: &resp.Data.Key, - Body: &stream.ReaderUpdatingProgress{ + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: file, UpdateProgress: up, - }, + }), } _, err = uploader.UploadWithContext(ctx, input) if err != nil { diff --git a/drivers/123/upload.go b/drivers/123/upload.go index a472df55..dc148c4c 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -81,6 +81,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi batchSize = 10 getS3UploadUrl = d.getS3PreSignedUrls } + limited := driver.NewLimitedUploadStream(ctx, file) for i := 1; i <= chunkCount; i += batchSize { if utils.IsCanceled(ctx) { return ctx.Err() @@ -103,7 +104,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi if j == chunkCount { curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize } - err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(file, chunkSize), curSize, false, getS3UploadUrl) + err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl) if err != nil { return err } diff --git a/drivers/139/driver.go b/drivers/139/driver.go index 1e2ba9c4..c6b30335 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -631,12 +631,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr // Progress p := driver.NewProgress(stream.GetSize(), up) + rateLimited := driver.NewLimitedUploadStream(ctx, stream) // 上传所有分片 for _, uploadPartInfo := range uploadPartInfos { index := uploadPartInfo.PartNumber - 1 partSize := partInfos[index].PartSize log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos)) - limitReader := io.LimitReader(stream, partSize) + limitReader := io.LimitReader(rateLimited, partSize) // Update Progress r := io.TeeReader(limitReader, p) @@ -787,6 +788,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr if part == 0 { part = 1 } + rateLimited := driver.NewLimitedUploadStream(ctx, stream) for i := int64(0); i < part; i++ { if utils.IsCanceled(ctx) { return ctx.Err() @@ -798,7 +800,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr byteSize = partSize } - limitReader := io.LimitReader(stream, byteSize) + limitReader := io.LimitReader(rateLimited, byteSize) // Update Progress r := io.TeeReader(limitReader, p) req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r) diff --git a/drivers/189/util.go b/drivers/189/util.go index 0b4c0633..16a5aa39 100644 --- a/drivers/189/util.go +++ b/drivers/189/util.go @@ -365,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F log.Debugf("uploadData: %+v", uploadData) requestURL := uploadData.RequestURL uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&") - req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData)) + req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) if err != nil { return err } @@ -375,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F req.Header.Set(v[0:i], v[i+1:]) } r, err := base.HttpClient.Do(req) - log.Debugf("%+v %+v", r, r.Request.Header) - r.Body.Close() if err != nil { return err } + log.Debugf("%+v %+v", r, r.Request.Header) + _ = r.Body.Close() up(float64(i) * 100 / float64(count)) } fileMd5 := hex.EncodeToString(md5Sum.Sum(nil)) diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index 6f3c4dcf..290d2e56 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -19,6 +19,8 @@ import ( "strings" "time" + "golang.org/x/sync/semaphore" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" @@ -174,8 +176,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str } var erron RespErr - jsoniter.Unmarshal(body, &erron) - xml.Unmarshal(body, &erron) + _ = jsoniter.Unmarshal(body, &erron) + _ = xml.Unmarshal(body, &erron) if erron.HasError() { return nil, &erron } @@ -508,6 +510,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + sem := semaphore.NewWeighted(3) fileMd5 := md5.New() silceMd5 := md5.New() @@ -517,7 +520,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo if utils.IsCanceled(upCtx) { break } - + if err = sem.Acquire(ctx, 1); err != nil { + break + } byteData := make([]byte, sliceSize) if i == count { byteData = byteData[:lastPartSize] @@ -526,6 +531,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo // 读取块 silceMd5.Reset() if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil { + sem.Release(1) return nil, err } @@ -535,6 +541,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) threadG.Go(func(ctx context.Context) error { + defer sem.Release(1) uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) if err != nil { return err @@ -542,7 +549,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo // step.4 上传切片 uploadUrl := uploadUrls[0] - _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily) + _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, + driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily) if err != nil { return err } @@ -794,6 +802,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model if err != nil { return nil, err } + rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile)) // 创建上传会话 uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily) @@ -820,7 +829,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId) } - _, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily) + _, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily) if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" { return nil, err } diff --git a/drivers/alist_v3/driver.go b/drivers/alist_v3/driver.go index 679285e0..5a299ea0 100644 --- a/drivers/alist_v3/driver.go +++ b/drivers/alist_v3/driver.go @@ -3,7 +3,6 @@ package alist_v3 import ( "context" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "path" @@ -183,10 +182,11 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error { } func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { - req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", &stream.ReaderUpdatingProgress{ + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: s, UpdateProgress: up, }) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader) if err != nil { return err } diff --git a/drivers/aliyundrive/driver.go b/drivers/aliyundrive/driver.go index 2a977aa3..105e28b2 100644 --- a/drivers/aliyundrive/driver.go +++ b/drivers/aliyundrive/driver.go @@ -14,13 +14,12 @@ import ( "os" "time" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/cron" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" @@ -194,7 +193,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil } if d.RapidUpload { buf := bytes.NewBuffer(make([]byte, 0, 1024)) - utils.CopyWithBufferN(buf, file, 1024) + _, err := utils.CopyWithBufferN(buf, file, 1024) + if err != nil { + return err + } reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes()) if localFile != nil { if _, err := localFile.Seek(0, io.SeekStart); err != nil { @@ -286,6 +288,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil file.Reader = localFile } + rateLimited := driver.NewLimitedUploadStream(ctx, file) for i, partInfo := range resp.PartInfoList { if utils.IsCanceled(ctx) { return ctx.Err() @@ -294,7 +297,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil if d.InternalUpload { url = partInfo.InternalUploadUrl } - req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT)) + req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT)) if err != nil { return err } @@ -303,7 +306,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() if count > 0 { up(float64(i) * 100 / float64(count)) } diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index 653a2442..fb730de6 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict { return fmt.Errorf("upload status: %d", res.StatusCode) } @@ -251,8 +251,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m rd = utils.NewMultiReadable(srd) } err = retry.Do(func() error { - rd.Reset() - return d.uploadPart(ctx, rd, createResp.PartInfoList[i]) + _ = rd.Reset() + rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) + return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i]) }, retry.Attempts(3), retry.DelayType(retry.BackOffDelay), diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index ad52a4b5..e0ba98fa 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -12,6 +12,8 @@ import ( "strconv" "time" + "golang.org/x/sync/semaphore" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -263,16 +265,21 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + sem := semaphore.NewWeighted(3) for i, partseq := range precreateResp.BlockList { if utils.IsCanceled(upCtx) { break } + if err = sem.Acquire(ctx, 1); err != nil { + break + } i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize if partseq+1 == count { byteSize = lastBlockSize } threadG.Go(func(ctx context.Context) error { + defer sem.Release(1) params := map[string]string{ "method": "upload", "access_token": d.AccessToken, @@ -281,7 +288,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F "uploadid": precreateResp.Uploadid, "partseq": strconv.Itoa(partseq), } - err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize)) + err := d.uploadSlice(ctx, params, stream.GetName(), + driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize))) if err != nil { return err } diff --git a/drivers/baidu_photo/driver.go b/drivers/baidu_photo/driver.go index b584c9a3..9ee0a7ae 100644 --- a/drivers/baidu_photo/driver.go +++ b/drivers/baidu_photo/driver.go @@ -13,6 +13,8 @@ import ( "strings" "time" + "golang.org/x/sync/semaphore" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -314,10 +316,14 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + sem := semaphore.NewWeighted(3) for i, partseq := range precreateResp.BlockList { if utils.IsCanceled(upCtx) { break } + if err = sem.Acquire(ctx, 1); err != nil { + break + } i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT if partseq+1 == count { @@ -325,6 +331,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil } threadG.Go(func(ctx context.Context) error { + defer sem.Release(1) uploadParams := map[string]string{ "method": "upload", "path": params["path"], @@ -335,7 +342,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil _, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) { r.SetContext(ctx) r.SetQueryParams(uploadParams) - r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize)) + r.SetFileReader("file", stream.GetName(), + driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize))) }, nil) if err != nil { return err diff --git a/drivers/base/client.go b/drivers/base/client.go index 8bf8f421..538c43a6 100644 --- a/drivers/base/client.go +++ b/drivers/base/client.go @@ -6,6 +6,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/net" "github.com/go-resty/resty/v2" ) @@ -26,7 +27,7 @@ func InitClient() { NoRedirectClient.SetHeader("user-agent", UserAgent) RestyClient = NewRestyClient() - HttpClient = NewHttpClient() + HttpClient = net.NewHttpClient() } func NewRestyClient() *resty.Client { @@ -38,13 +39,3 @@ func NewRestyClient() *resty.Client { SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}) return client } - -func NewHttpClient() *http.Client { - return &http.Client{ - Timeout: time.Hour * 48, - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}, - }, - } -} diff --git a/drivers/chaoxing/driver.go b/drivers/chaoxing/driver.go index 9b526f8a..bf01a83b 100644 --- a/drivers/chaoxing/driver.go +++ b/drivers/chaoxing/driver.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "io" "mime/multipart" "net/http" @@ -249,13 +248,13 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr if err != nil { return err } - r := &stream.ReaderUpdatingProgress{ - Reader: &stream.SimpleReaderWithSize{ + r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ Reader: body, Size: int64(body.Len()), }, UpdateProgress: up, - } + }) req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r) if err != nil { return err diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index 8fc117ac..73fc3fea 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -1,7 +1,9 @@ package cloudreve import ( + "bytes" "context" + "errors" "io" "net/http" "path" @@ -173,7 +175,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File var n int buf = make([]byte, chunkSize) n, err = io.ReadAtLeast(stream, buf, chunkSize) - if err != nil && err != io.ErrUnexpectedEOF { + if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) { if err == io.EOF { return nil } @@ -186,7 +188,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { req.SetHeader("Content-Type", "application/octet-stream") req.SetHeader("Content-Length", strconv.Itoa(n)) - req.SetBody(buf) + req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf))) }, nil) if err != nil { break diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index b5b71153..8a90a42f 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -100,7 +100,7 @@ func (d *Cloudreve) login() error { if err == nil { break } - if err != nil && err.Error() != "CAPTCHA not match." { + if err.Error() != "CAPTCHA not match." { break } } @@ -202,7 +202,8 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U if err != nil { return err } - req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), bytes.NewBuffer(byteData)) + req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), + driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) if err != nil { return err } @@ -214,7 +215,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() up(float64(finish) * 100 / float64(stream.GetSize())) chunk++ } @@ -241,7 +242,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u if err != nil { return err } - req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData)) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) if err != nil { return err } @@ -256,10 +257,10 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 { data, _ := io.ReadAll(res.Body) - res.Body.Close() + _ = res.Body.Close() return errors.New(string(data)) } - res.Body.Close() + _ = res.Body.Close() up(float64(finish) * 100 / float64(stream.GetSize())) } // 上传成功发送回调请求 diff --git a/drivers/dropbox/driver.go b/drivers/dropbox/driver.go index 9b1717b0..fbaecc4a 100644 --- a/drivers/dropbox/driver.go +++ b/drivers/dropbox/driver.go @@ -191,7 +191,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt } url := d.contentBase + "/2/files/upload_session/append_v2" - reader := io.LimitReader(stream, PartSize) + reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize)) req, err := http.NewRequest(http.MethodPost, url, reader) if err != nil { log.Errorf("failed to update file when append to upload session, err: %+v", err) @@ -219,13 +219,8 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt return err } _ = res.Body.Close() - - if count > 0 { - up(float64(i+1) * 100 / float64(count)) - } - + up(float64(i+1) * 100 / float64(count)) offset += byteSize - } // 3.finish toPath := dstDir.GetPath() + "/" + stream.GetName() diff --git a/drivers/ftp/driver.go b/drivers/ftp/driver.go index b3e95f93..8f30b780 100644 --- a/drivers/ftp/driver.go +++ b/drivers/ftp/driver.go @@ -2,7 +2,6 @@ package ftp import ( "context" - "github.com/alist-org/alist/v3/internal/stream" stdpath "path" "github.com/alist-org/alist/v3/internal/driver" @@ -120,13 +119,10 @@ func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, u return err } path := stdpath.Join(dstDir.GetPath(), s.GetName()) - return d.conn.Stor(encode(path, d.Encoding), &stream.ReaderWithCtx{ - Reader: &stream.ReaderUpdatingProgress{ - Reader: s, - UpdateProgress: up, - }, - Ctx: ctx, - }) + return d.conn.Stor(encode(path, d.Encoding), driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + })) } var _ driver.Driver = (*FTP)(nil) diff --git a/drivers/github/driver.go b/drivers/github/driver.go index dee4cbbf..d1cfd9fb 100644 --- a/drivers/github/driver.go +++ b/drivers/github/driver.go @@ -16,7 +16,6 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" log "github.com/sirupsen/logrus" @@ -676,13 +675,13 @@ func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.Up afterContentReader := strings.NewReader(afterContent) req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo), - &stream.ReaderUpdatingProgress{ - Reader: &stream.SimpleReaderWithSize{ + driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader), Size: length, }, UpdateProgress: up, - }) + })) if err != nil { return "", err } @@ -698,6 +697,7 @@ func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.Up if err != nil { return "", err } + defer res.Body.Close() resBody, err := io.ReadAll(res.Body) if err != nil { return "", err diff --git a/drivers/google_drive/driver.go b/drivers/google_drive/driver.go index dccdcea9..c8afb084 100644 --- a/drivers/google_drive/driver.go +++ b/drivers/google_drive/driver.go @@ -158,7 +158,8 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi putUrl := res.Header().Get("location") if stream.GetSize() < d.ChunkSize*1024*1024 { _, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) { - req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream) + req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)). + SetBody(driver.NewLimitedUploadStream(ctx, stream)) }, nil) } else { err = d.chunkUpload(ctx, stream, putUrl) diff --git a/drivers/google_drive/util.go b/drivers/google_drive/util.go index 0d380112..0fe54346 100644 --- a/drivers/google_drive/util.go +++ b/drivers/google_drive/util.go @@ -11,10 +11,10 @@ import ( "strconv" "time" - "github.com/alist-org/alist/v3/pkg/http_range" - "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" "github.com/golang-jwt/jwt/v4" @@ -126,8 +126,7 @@ func (d *GoogleDrive) refreshToken() error { } d.AccessToken = resp.AccessToken return nil - } - if gdsaFileErr != nil && os.IsExist(gdsaFileErr) { + } else if os.IsExist(gdsaFileErr) { return gdsaFileErr } url := "https://www.googleapis.com/oauth2/v4/token" @@ -229,6 +228,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer if err != nil { return err } + reader = driver.NewLimitedUploadStream(ctx, reader) _, err = d.request(url, http.MethodPut, func(req *resty.Request) { req.SetHeaders(map[string]string{ "Content-Length": strconv.FormatInt(chunkSize, 10), diff --git a/drivers/google_photo/driver.go b/drivers/google_photo/driver.go index b54132ef..e6f0abc6 100644 --- a/drivers/google_photo/driver.go +++ b/drivers/google_photo/driver.go @@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi } resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) { - req.SetBody(stream).SetContext(ctx) + req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx) }, nil, postHeaders) if err != nil { diff --git a/drivers/halalcloud/driver.go b/drivers/halalcloud/driver.go index d3235828..26832760 100644 --- a/drivers/halalcloud/driver.go +++ b/drivers/halalcloud/driver.go @@ -392,10 +392,11 @@ func (d *HalalCloud) put(ctx context.Context, dstDir model.Obj, fileStream model if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1) } + reader := driver.NewLimitedUploadStream(ctx, fileStream) _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ Bucket: aws.String(result.Bucket), Key: aws.String(result.Key), - Body: io.TeeReader(fileStream, driver.NewProgress(fileStream.GetSize(), up)), + Body: io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up)), }) return nil, err diff --git a/drivers/ilanzou/driver.go b/drivers/ilanzou/driver.go index 22d1589f..697d85b1 100644 --- a/drivers/ilanzou/driver.go +++ b/drivers/ilanzou/driver.go @@ -309,13 +309,13 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame upToken := utils.Json.Get(res, "upToken").ToString() now := time.Now() key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli()) - reader := &stream.ReaderUpdatingProgress{ - Reader: &stream.SimpleReaderWithSize{ + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ Reader: tempFile, Size: s.GetSize(), }, UpdateProgress: up, - } + }) var token string if s.GetSize() <= DefaultPartSize { res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{ diff --git a/drivers/ipfs_api/driver.go b/drivers/ipfs_api/driver.go index 61886b38..77760656 100644 --- a/drivers/ipfs_api/driver.go +++ b/drivers/ipfs_api/driver.go @@ -3,7 +3,6 @@ package ipfs import ( "context" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "net/url" stdpath "path" "path/filepath" @@ -111,13 +110,10 @@ func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error { func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { // TODO upload file, optional - _, err := d.sh.Add(&stream.ReaderWithCtx{ - Reader: &stream.ReaderUpdatingProgress{ - Reader: s, - UpdateProgress: up, - }, - Ctx: ctx, - }, ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName()))) + _, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }), ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName()))) return err } diff --git a/drivers/kodbox/driver.go b/drivers/kodbox/driver.go index ff48ffb2..c536c916 100644 --- a/drivers/kodbox/driver.go +++ b/drivers/kodbox/driver.go @@ -3,9 +3,6 @@ package kodbox import ( "context" "fmt" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/pkg/utils" - "github.com/go-resty/resty/v2" "net/http" "path/filepath" "strings" @@ -13,6 +10,8 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" ) type KodBox struct { @@ -229,10 +228,10 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error { func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { var resp *CommonResp _, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) { - r := &stream.ReaderUpdatingProgress{ + r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: s, UpdateProgress: up, - } + }) req.SetFileReader("file", s.GetName(), r). SetResult(&resp). SetFormData(map[string]string{ diff --git a/drivers/lanzou/driver.go b/drivers/lanzou/driver.go index 90635d16..877e72bb 100644 --- a/drivers/lanzou/driver.go +++ b/drivers/lanzou/driver.go @@ -2,7 +2,6 @@ package lanzou import ( "context" - "github.com/alist-org/alist/v3/internal/stream" "net/http" "github.com/alist-org/alist/v3/drivers/base" @@ -213,6 +212,10 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer if d.IsCookie() || d.IsAccount() { var resp RespText[[]FileOrFolder] _, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) { + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) req.SetFormData(map[string]string{ "task": "1", "vie": "2", @@ -220,10 +223,7 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer "id": "WU_FILE_0", "name": s.GetName(), "folder_id_bb_n": dstDir.GetID(), - }).SetFileReader("upload_file", s.GetName(), &stream.ReaderUpdatingProgress{ - Reader: s, - UpdateProgress: up, - }).SetContext(ctx) + }).SetFileReader("upload_file", s.GetName(), reader).SetContext(ctx) }, &resp, true) if err != nil { return nil, err diff --git a/drivers/lark/driver.go b/drivers/lark/driver.go index d2672300..fbf7529a 100644 --- a/drivers/lark/driver.go +++ b/drivers/lark/driver.go @@ -320,7 +320,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea Build() // 发起请求 - uploadLimit.Wait(ctx) + err := uploadLimit.Wait(ctx) + if err != nil { + return nil, err + } resp, err := c.client.Drive.File.UploadPrepare(ctx, req) if err != nil { return nil, err @@ -341,7 +344,7 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea length = stream.GetSize() - int64(i*blockSize) } - reader := io.LimitReader(stream, length) + reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, length)) req := larkdrive.NewUploadPartFileReqBuilder(). Body(larkdrive.NewUploadPartFileReqBodyBuilder(). @@ -353,7 +356,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea Build() // 发起请求 - uploadLimit.Wait(ctx) + err = uploadLimit.Wait(ctx) + if err != nil { + return nil, err + } resp, err := c.client.Drive.File.UploadPart(ctx, req) if err != nil { diff --git a/drivers/mediatrack/driver.go b/drivers/mediatrack/driver.go index ed53f8ee..50ef9799 100644 --- a/drivers/mediatrack/driver.go +++ b/drivers/mediatrack/driver.go @@ -5,7 +5,6 @@ import ( "crypto/md5" "encoding/hex" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "strconv" @@ -195,13 +194,13 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileS input := &s3manager.UploadInput{ Bucket: &resp.Data.Bucket, Key: &resp.Data.Object, - Body: &stream.ReaderUpdatingProgress{ - Reader: &stream.SimpleReaderWithSize{ + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ Reader: tempFile, Size: file.GetSize(), }, UpdateProgress: up, - }, + }), } _, err = uploader.UploadWithContext(ctx, input) if err != nil { diff --git a/drivers/mega/driver.go b/drivers/mega/driver.go index 198c1f98..f76bfeef 100644 --- a/drivers/mega/driver.go +++ b/drivers/mega/driver.go @@ -156,6 +156,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea return err } + reader := driver.NewLimitedUploadStream(ctx, stream) for id := 0; id < u.Chunks(); id++ { if utils.IsCanceled(ctx) { return ctx.Err() @@ -165,7 +166,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea return err } chunk := make([]byte, chkSize) - n, err := io.ReadFull(stream, chunk) + n, err := io.ReadFull(reader, chunk) if err != nil && err != io.EOF { return err } diff --git a/drivers/misskey/driver.go b/drivers/misskey/driver.go index 29797a01..b5c753f3 100644 --- a/drivers/misskey/driver.go +++ b/drivers/misskey/driver.go @@ -64,7 +64,7 @@ func (d *Misskey) Remove(ctx context.Context, obj model.Obj) error { } func (d *Misskey) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - return d.put(dstDir, stream, up) + return d.put(ctx, dstDir, stream, up) } //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { diff --git a/drivers/misskey/util.go b/drivers/misskey/util.go index 4d5a3b4d..f8baeafa 100644 --- a/drivers/misskey/util.go +++ b/drivers/misskey/util.go @@ -1,7 +1,6 @@ package misskey import ( - "bytes" "context" "errors" "io" @@ -190,16 +189,16 @@ func (d *Misskey) remove(obj model.Obj) error { } } -func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { +func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { var file MFile - fileContent, err := io.ReadAll(stream) - if err != nil { - return nil, err - } - + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: stream, + UpdateProgress: up, + }) req := base.RestyClient.R(). - SetFileReader("file", stream.GetName(), io.NopCloser(bytes.NewReader(fileContent))). + SetContext(ctx). + SetFileReader("file", stream.GetName(), reader). SetFormData(map[string]string{ "folderId": handleFolderId(dstDir).(string), "name": stream.GetName(), @@ -207,7 +206,8 @@ func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.Upd "isSensitive": "false", "force": "false", }). - SetResult(&file).SetAuthToken(d.AccessToken) + SetResult(&file). + SetAuthToken(d.AccessToken) resp, err := req.Post(d.Endpoint + "/api/drive/files/create") if err != nil { diff --git a/drivers/mopan/driver.go b/drivers/mopan/driver.go index 369ec83b..2cbabe46 100644 --- a/drivers/mopan/driver.go +++ b/drivers/mopan/driver.go @@ -10,6 +10,8 @@ import ( "strings" "time" + "golang.org/x/sync/semaphore" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" @@ -301,6 +303,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + sem := semaphore.NewWeighted(3) // step.3 parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos) @@ -312,6 +315,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre if utils.IsCanceled(upCtx) { break } + if err = sem.Acquire(ctx, 1); err != nil { + break + } i, part, byteSize := i, part, initUpdload.PartSize if part.PartNumber == uploadPartData.PartTotal { byteSize = initUpdload.LastPartSize @@ -319,7 +325,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre // step.4 threadG.Go(func(ctx context.Context) error { - req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)) + defer sem.Release(1) + reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize) + req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader)) if err != nil { return err } @@ -328,7 +336,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre if err != nil { return err } - resp.Body.Close() + _ = resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("upload err,code=%d", resp.StatusCode) } diff --git a/drivers/netease_music/types.go b/drivers/netease_music/types.go index 332f75e9..12afeb7a 100644 --- a/drivers/netease_music/types.go +++ b/drivers/netease_music/types.go @@ -116,16 +116,3 @@ func (ch *Characteristic) merge(data map[string]string) map[string]interface{} { } return body } - -type InlineReadCloser struct { - io.Reader - io.Closer -} - -func (rc *InlineReadCloser) Read(p []byte) (int, error) { - return rc.Reader.Read(p) -} - -func (rc *InlineReadCloser) Close() error { - return rc.Closer.Close() -} diff --git a/drivers/netease_music/util.go b/drivers/netease_music/util.go index 25efde77..2e78be14 100644 --- a/drivers/netease_music/util.go +++ b/drivers/netease_music/util.go @@ -2,8 +2,6 @@ package netease_music import ( "context" - "github.com/alist-org/alist/v3/internal/driver" - "github.com/alist-org/alist/v3/internal/stream" "net/http" "path" "regexp" @@ -12,6 +10,7 @@ import ( "time" "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" @@ -69,13 +68,10 @@ func (d *NeteaseMusic) request(url, method string, opt ReqOption) ([]byte, error opt.up = func(_ float64) {} } req.SetContentLength(true) - req.SetBody(&InlineReadCloser{ - Reader: &stream.ReaderUpdatingProgress{ - Reader: opt.stream, - UpdateProgress: opt.up, - }, - Closer: opt.stream, - }) + req.SetBody(driver.NewLimitedUploadStream(opt.ctx, &driver.ReaderUpdatingProgress{ + Reader: opt.stream, + UpdateProgress: opt.up, + })) } else { req.SetFormData(data) } diff --git a/drivers/onedrive/util.go b/drivers/onedrive/util.go index 95f92db6..9350a681 100644 --- a/drivers/onedrive/util.go +++ b/drivers/onedrive/util.go @@ -152,12 +152,8 @@ func (d *Onedrive) upSmall(ctx context.Context, dstDir model.Obj, stream model.F // 1. upload new file // ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online url := d.GetMetaUrl(false, filepath) + "/content" - data, err := io.ReadAll(stream) - if err != nil { - return err - } - _, err = d.Request(url, http.MethodPut, func(req *resty.Request) { - req.SetBody(data).SetContext(ctx) + _, err := d.Request(url, http.MethodPut, func(req *resty.Request) { + req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx) }, nil) if err != nil { return fmt.Errorf("onedrive: Failed to upload new file(path=%v): %w", filepath, err) @@ -225,7 +221,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil if err != nil { return err } - req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData)) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) if err != nil { return err } diff --git a/drivers/onedrive_app/util.go b/drivers/onedrive_app/util.go index d036e131..a6793520 100644 --- a/drivers/onedrive_app/util.go +++ b/drivers/onedrive_app/util.go @@ -140,12 +140,8 @@ func (d *OnedriveAPP) GetFile(path string) (*File, error) { func (d *OnedriveAPP) upSmall(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) error { url := d.GetMetaUrl(false, stdpath.Join(dstDir.GetPath(), stream.GetName())) + "/content" - data, err := io.ReadAll(stream) - if err != nil { - return err - } - _, err = d.Request(url, http.MethodPut, func(req *resty.Request) { - req.SetBody(data).SetContext(ctx) + _, err := d.Request(url, http.MethodPut, func(req *resty.Request) { + req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx) }, nil) return err } @@ -175,7 +171,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model. if err != nil { return err } - req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData)) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) if err != nil { return err } diff --git a/drivers/pikpak/util.go b/drivers/pikpak/util.go index eb96a42a..f2594e78 100644 --- a/drivers/pikpak/util.go +++ b/drivers/pikpak/util.go @@ -10,7 +10,6 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" - "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/aliyun/aliyun-oss-go-sdk/oss" jsoniter "github.com/json-iterator/go" @@ -430,13 +429,10 @@ func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.File return err } - err = bucket.PutObject(params.Key, &stream.ReaderWithCtx{ - Reader: &stream.ReaderUpdatingProgress{ - Reader: s, - UpdateProgress: up, - }, - Ctx: ctx, - }, OssOption(params)...) + err = bucket.PutObject(params.Key, driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }), OssOption(params)...) if err != nil { return err } @@ -522,11 +518,8 @@ func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSi continue } - b := bytes.NewBuffer(buf) - if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{ - Reader: b, - Ctx: ctx, - }, chunk.Size, chunk.Number, OssOption(params)...); err == nil { + b := driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)) + if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil { break } } diff --git a/drivers/quark_uc/driver.go b/drivers/quark_uc/driver.go index 8674fbab..04757b1b 100644 --- a/drivers/quark_uc/driver.go +++ b/drivers/quark_uc/driver.go @@ -1,6 +1,7 @@ package quark import ( + "bytes" "context" "crypto/md5" "crypto/sha1" @@ -178,7 +179,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File } // part up partSize := pre.Metadata.PartSize - var bytes []byte + var part []byte md5s := make([]string, 0) defaultBytes := make([]byte, partSize) total := stream.GetSize() @@ -189,17 +190,18 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File return ctx.Err() } if left > int64(partSize) { - bytes = defaultBytes + part = defaultBytes } else { - bytes = make([]byte, left) + part = make([]byte, left) } - _, err := io.ReadFull(tempFile, bytes) + _, err := io.ReadFull(tempFile, part) if err != nil { return err } - left -= int64(len(bytes)) + left -= int64(len(part)) log.Debugf("left: %d", left) - m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, bytes) + reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part)) + m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader) //m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str) if err != nil { return err diff --git a/drivers/quark_uc/util.go b/drivers/quark_uc/util.go index df27af67..9a3bdc1c 100644 --- a/drivers/quark_uc/util.go +++ b/drivers/quark_uc/util.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "errors" "fmt" + "io" "net/http" "strconv" "strings" @@ -119,7 +120,7 @@ func (d *QuarkOrUC) upHash(md5, sha1, taskId string) (bool, error) { return resp.Data.Finish, err } -func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes []byte) (string, error) { +func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes io.Reader) (string, error) { //func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) { timeStr := time.Now().UTC().Format(http.TimeFormat) data := base.Json{ @@ -163,6 +164,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit "partNumber": strconv.Itoa(partNumber), "uploadId": pre.Data.UploadId, }).SetBody(bytes).Put(u) + if err != nil { + return "", err + } if res.StatusCode() != 200 { return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String()) } @@ -230,6 +234,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit SetQueryParams(map[string]string{ "uploadId": pre.Data.UploadId, }).SetBody(body).Post(u) + if err != nil { + return err + } if res.StatusCode() != 200 { return fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String()) } diff --git a/drivers/quqi/driver.go b/drivers/quqi/driver.go index 2ab972ca..0fa64041 100644 --- a/drivers/quqi/driver.go +++ b/drivers/quqi/driver.go @@ -12,7 +12,6 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" - istream "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils/random" "github.com/aws/aws-sdk-go/aws" @@ -387,8 +386,8 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea } uploader := s3manager.NewUploader(s) buf := make([]byte, 1024*1024*2) - fup := &istream.ReaderUpdatingProgress{ - Reader: &istream.SimpleReaderWithSize{ + fup := &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ Reader: f, Size: int64(len(buf)), }, @@ -402,12 +401,19 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea } return nil, err } + reader := bytes.NewReader(buf[:n]) _, err = uploader.S3.UploadPartWithContext(ctx, &s3.UploadPartInput{ UploadId: &uploadInitResp.Data.UploadID, Key: &uploadInitResp.Data.Key, Bucket: &uploadInitResp.Data.Bucket, PartNumber: aws.Int64(partNumber), - Body: bytes.NewReader(buf[:n]), + Body: struct { + *driver.RateLimitReader + io.Seeker + }{ + RateLimitReader: driver.NewLimitedUploadStream(ctx, reader), + Seeker: reader, + }, }) if err != nil { return nil, err diff --git a/drivers/s3/driver.go b/drivers/s3/driver.go index a7e924e2..b7411489 100644 --- a/drivers/s3/driver.go +++ b/drivers/s3/driver.go @@ -4,18 +4,17 @@ import ( "bytes" "context" "fmt" - "github.com/alist-org/alist/v3/server/common" "io" "net/url" stdpath "path" "strings" "time" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/pkg/cron" - "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/cron" + "github.com/alist-org/alist/v3/server/common" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" @@ -174,10 +173,10 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up input := &s3manager.UploadInput{ Bucket: &d.Bucket, Key: &key, - Body: &stream.ReaderUpdatingProgress{ + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: s, UpdateProgress: up, - }, + }), ContentType: &contentType, } _, err := uploader.UploadWithContext(ctx, input) diff --git a/drivers/seafile/driver.go b/drivers/seafile/driver.go index f23038d1..239f57dd 100644 --- a/drivers/seafile/driver.go +++ b/drivers/seafile/driver.go @@ -3,7 +3,6 @@ package seafile import ( "context" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "net/http" "strings" "time" @@ -215,10 +214,10 @@ func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame u := string(res) u = u[1 : len(u)-1] // remove quotes _, err = d.request(http.MethodPost, u, func(req *resty.Request) { - r := &stream.ReaderUpdatingProgress{ + r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: s, UpdateProgress: up, - } + }) req.SetFileReader("file", s.GetName(), r). SetFormData(map[string]string{ "parent_dir": path, diff --git a/drivers/sftp/driver.go b/drivers/sftp/driver.go index 1f216598..7498ce39 100644 --- a/drivers/sftp/driver.go +++ b/drivers/sftp/driver.go @@ -111,7 +111,7 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea defer func() { _ = dstFile.Close() }() - err = utils.CopyWithCtx(ctx, dstFile, stream, stream.GetSize(), up) + err = utils.CopyWithCtx(ctx, dstFile, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up) return err } diff --git a/drivers/smb/driver.go b/drivers/smb/driver.go index 9632f24e..c292e92e 100644 --- a/drivers/smb/driver.go +++ b/drivers/smb/driver.go @@ -186,7 +186,7 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream _ = d.fs.Remove(fullPath) } }() - err = utils.CopyWithCtx(ctx, out, stream, stream.GetSize(), up) + err = utils.CopyWithCtx(ctx, out, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up) if err != nil { return err } diff --git a/drivers/teambition/driver.go b/drivers/teambition/driver.go index c75d2ac0..b37c324b 100644 --- a/drivers/teambition/driver.go +++ b/drivers/teambition/driver.go @@ -148,7 +148,7 @@ func (d *Teambition) Put(ctx context.Context, dstDir model.Obj, stream model.Fil var newFile *FileUpload if stream.GetSize() <= 20971520 { // post upload - newFile, err = d.upload(ctx, stream, token) + newFile, err = d.upload(ctx, stream, token, up) } else { // chunk upload //err = base.ErrNotImplement diff --git a/drivers/teambition/util.go b/drivers/teambition/util.go index 181cc58f..01c12cb1 100644 --- a/drivers/teambition/util.go +++ b/drivers/teambition/util.go @@ -1,6 +1,7 @@ package teambition import ( + "bytes" "context" "errors" "fmt" @@ -120,11 +121,15 @@ func (d *Teambition) getFiles(parentId string) ([]model.Obj, error) { return files, nil } -func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string) (*FileUpload, error) { +func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string, up driver.UpdateProgress) (*FileUpload, error) { prefix := "tcs" if d.isInternational() { prefix = "us-tcs" } + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + }) var newFile FileUpload res, err := base.RestyClient.R(). SetContext(ctx). @@ -134,7 +139,8 @@ func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token "type": file.GetMimetype(), "size": strconv.FormatInt(file.GetSize(), 10), "lastModifiedDate": time.Now().Format("Mon Jan 02 2006 15:04:05 GMT+0800 (中国标准时间)"), - }).SetMultipartField("file", file.GetName(), file.GetMimetype(), file). + }). + SetMultipartField("file", file.GetName(), file.GetMimetype(), reader). Post(fmt.Sprintf("https://%s.teambition.net/upload", prefix)) if err != nil { return nil, err @@ -183,10 +189,9 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t "Authorization": token, "Content-Type": "application/octet-stream", "Referer": referer, - }).SetBody(chunkData).Post(u) - if err != nil { - return nil, err - } + }). + SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(chunkData))). + Post(u) if err != nil { return nil, err } @@ -252,7 +257,10 @@ func (d *Teambition) newUpload(ctx context.Context, dstDir model.Obj, stream mod Key: &uploadToken.Upload.Key, ContentDisposition: &uploadToken.Upload.ContentDisposition, ContentType: &uploadToken.Upload.ContentType, - Body: stream, + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: stream, + UpdateProgress: up, + }), } _, err = uploader.UploadWithContext(ctx, input) if err != nil { diff --git a/drivers/terabox/driver.go b/drivers/terabox/driver.go index 362de69e..82962b81 100644 --- a/drivers/terabox/driver.go +++ b/drivers/terabox/driver.go @@ -228,7 +228,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt res, err := base.RestyClient.R(). SetContext(ctx). SetQueryParams(params). - SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)). + SetFileReader("file", stream.GetName(), driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))). SetHeader("Cookie", d.Cookie). Post(u) if err != nil { diff --git a/drivers/thunder/driver.go b/drivers/thunder/driver.go index 1b7f0af6..7f41d003 100644 --- a/drivers/thunder/driver.go +++ b/drivers/thunder/driver.go @@ -3,7 +3,6 @@ package thunder import ( "context" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "net/http" "strconv" "strings" @@ -383,10 +382,10 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi Bucket: aws.String(param.Bucket), Key: aws.String(param.Key), Expires: aws.Time(param.Expiration), - Body: &stream.ReaderUpdatingProgress{ + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: file, UpdateProgress: up, - }, + }), }) return err } diff --git a/drivers/thunder_browser/driver.go b/drivers/thunder_browser/driver.go index 96dd7e8e..7ce71f7d 100644 --- a/drivers/thunder_browser/driver.go +++ b/drivers/thunder_browser/driver.go @@ -508,7 +508,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream Bucket: aws.String(param.Bucket), Key: aws.String(param.Key), Expires: aws.Time(param.Expiration), - Body: io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up)), + Body: driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up))), }) return err } diff --git a/drivers/thunderx/driver.go b/drivers/thunderx/driver.go index 93e07ca9..2194bdc6 100644 --- a/drivers/thunderx/driver.go +++ b/drivers/thunderx/driver.go @@ -8,7 +8,6 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" - "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/aws/aws-sdk-go/aws" @@ -414,10 +413,10 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.F Bucket: aws.String(param.Bucket), Key: aws.String(param.Key), Expires: aws.Time(param.Expiration), - Body: &stream.ReaderUpdatingProgress{ + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: file, UpdateProgress: up, - }, + }), }) return err } diff --git a/drivers/trainbit/driver.go b/drivers/trainbit/driver.go index 2b1815ed..f4f4bf3f 100644 --- a/drivers/trainbit/driver.go +++ b/drivers/trainbit/driver.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/alist-org/alist/v3/internal/stream" "io" "net/http" "net/url" @@ -59,7 +58,7 @@ func (d *Trainbit) List(ctx context.Context, dir model.Obj, args model.ListArgs) return nil, err } var jsonData any - json.Unmarshal(data, &jsonData) + err = json.Unmarshal(data, &jsonData) if err != nil { return nil, err } @@ -122,10 +121,10 @@ func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStream query.Add("guid", guid) query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+".")) endpoint.RawQuery = query.Encode() - progressReader := &stream.ReaderUpdatingProgress{ + progressReader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: s, UpdateProgress: up, - } + }) req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader) if err != nil { return err diff --git a/drivers/url_tree/driver.go b/drivers/url_tree/driver.go index 569b3fba..f97d5cc5 100644 --- a/drivers/url_tree/driver.go +++ b/drivers/url_tree/driver.go @@ -3,7 +3,6 @@ package url_tree import ( "context" "errors" - "github.com/alist-org/alist/v3/internal/op" stdpath "path" "strings" "sync" @@ -11,6 +10,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" log "github.com/sirupsen/logrus" ) diff --git a/drivers/uss/driver.go b/drivers/uss/driver.go index 3c54797c..2e219050 100644 --- a/drivers/uss/driver.go +++ b/drivers/uss/driver.go @@ -126,13 +126,10 @@ func (d *USS) Remove(ctx context.Context, obj model.Obj) error { func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { return d.client.Put(&upyun.PutObjectConfig{ Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false), - Reader: &stream.ReaderWithCtx{ - Reader: &stream.ReaderUpdatingProgress{ - Reader: s, - UpdateProgress: up, - }, - Ctx: ctx, - }, + Reader: driver.NewLimitedUploadStream(ctx, &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }), }) } diff --git a/drivers/vtencent/util.go b/drivers/vtencent/util.go index ba87f1ab..91db54b7 100644 --- a/drivers/vtencent/util.go +++ b/drivers/vtencent/util.go @@ -278,7 +278,8 @@ func (d *Vtencent) FileUpload(ctx context.Context, dstDir model.Obj, stream mode input := &s3manager.UploadInput{ Bucket: aws.String(fmt.Sprintf("%s-%d", params.StorageBucket, params.StorageAppID)), Key: ¶ms.Video.StoragePath, - Body: io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up))), + Body: driver.NewLimitedUploadStream(ctx, + io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up)))), } _, err = uploader.UploadWithContext(ctx, input) if err != nil { diff --git a/drivers/webdav/driver.go b/drivers/webdav/driver.go index 35240c49..45150fca 100644 --- a/drivers/webdav/driver.go +++ b/drivers/webdav/driver.go @@ -2,7 +2,6 @@ package webdav import ( "context" - "github.com/alist-org/alist/v3/internal/stream" "net/http" "os" "path" @@ -99,13 +98,11 @@ func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer r.Header.Set("Content-Type", s.GetMimetype()) r.ContentLength = s.GetSize() } - err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), &stream.ReaderWithCtx{ - Reader: &stream.ReaderUpdatingProgress{ - Reader: s, - UpdateProgress: up, - }, - Ctx: ctx, - }, 0644, callback) + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) + err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), reader, 0644, callback) return err } diff --git a/drivers/weiyun/driver.go b/drivers/weiyun/driver.go index 59bd7237..90793d33 100644 --- a/drivers/weiyun/driver.go +++ b/drivers/weiyun/driver.go @@ -70,7 +70,7 @@ func (d *WeiYun) Init(ctx context.Context) error { if d.client.LoginType() == 1 { d.cron = cron.NewCron(time.Minute * 5) d.cron.Do(func() { - d.client.KeepAlive() + _ = d.client.KeepAlive() }) } @@ -364,12 +364,13 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr threadG.Go(func(ctx context.Context) error { for { channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len))) + len64 := int64(channel.Len) upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData, - io.NewSectionReader(file, channel.Offset, int64(channel.Len))) + driver.NewLimitedUploadStream(ctx, io.NewSectionReader(file, channel.Offset, len64))) if err != nil { return err } - cur := total.Add(int64(channel.Len)) + cur := total.Add(len64) up(float64(cur) * 100.0 / float64(stream.GetSize())) // 上传完成 if upData.UploadState != 1 { diff --git a/drivers/wopan/driver.go b/drivers/wopan/driver.go index 86093fc1..82ec05a9 100644 --- a/drivers/wopan/driver.go +++ b/drivers/wopan/driver.go @@ -155,7 +155,7 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre _, err := d.client.Upload2C(d.getSpaceType(), wopan.Upload2CFile{ Name: stream.GetName(), Size: stream.GetSize(), - Content: stream, + Content: driver.NewLimitedUploadStream(ctx, stream), ContentType: stream.GetMimetype(), }, dstDir.GetID(), d.FamilyID, wopan.Upload2COption{ OnProgress: func(current, total int64) { diff --git a/drivers/yandex_disk/driver.go b/drivers/yandex_disk/driver.go index fe858519..6e5ca05c 100644 --- a/drivers/yandex_disk/driver.go +++ b/drivers/yandex_disk/driver.go @@ -2,7 +2,6 @@ package yandex_disk import ( "context" - "github.com/alist-org/alist/v3/internal/stream" "net/http" "path" "strconv" @@ -118,10 +117,11 @@ func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, s model.FileStre if err != nil { return err } - req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, &stream.ReaderUpdatingProgress{ + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: s, UpdateProgress: up, }) + req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, reader) if err != nil { return err } diff --git a/go.mod b/go.mod index 2bf4ba3e..7bf8a4bb 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,7 @@ require ( github.com/u2takey/ffmpeg-go v0.5.0 github.com/upyun/go-sdk/v3 v3.0.4 github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 - github.com/xhofe/tache v0.1.3 + github.com/xhofe/tache v0.1.5 github.com/xhofe/wopan-sdk-go v0.1.3 github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 @@ -102,6 +102,7 @@ require ( github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/text v0.2.0 // indirect + github.com/matoous/go-nanoid/v2 v2.1.0 // indirect github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect @@ -170,7 +171,6 @@ require ( github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgx/v5 v5.5.5 // indirect - github.com/jaevor/go-nanoid v1.3.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -240,7 +240,7 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect go.etcd.io/bbolt v1.3.8 // indirect golang.org/x/arch v0.8.0 // indirect - golang.org/x/sync v0.10.0 // indirect + golang.org/x/sync v0.10.0 golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 diff --git a/go.sum b/go.sum index db58dea2..a51e0c6a 100644 --- a/go.sum +++ b/go.sum @@ -337,8 +337,6 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jaevor/go-nanoid v1.3.0 h1:nD+iepesZS6pr3uOVf20vR9GdGgJW1HPaR46gtrxzkg= -github.com/jaevor/go-nanoid v1.3.0/go.mod h1:SI+jFaPuddYkqkVQoNGHs81navCtH388TcrH0RqFKgY= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= @@ -403,6 +401,8 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE= +github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -596,8 +596,8 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= -github.com/xhofe/tache v0.1.3 h1:MipxzlljYX29E1YI/SLC7hVomVF+51iP1OUzlsuq1wE= -github.com/xhofe/tache v0.1.3/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ= +github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM= +github.com/xhofe/tache v0.1.5/go.mod h1:PYt6I/XUKliSg1uHlgsk6ha+le/f6PAvjUtFZAVl3a8= github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A= github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index 5e8a2be4..de3b8af9 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -11,6 +11,7 @@ import ( "github.com/alist-org/alist/v3/pkg/utils/random" "github.com/pkg/errors" "gorm.io/gorm" + "strconv" ) var initialSettingItems []model.SettingItem @@ -191,12 +192,12 @@ func InitialSettings() []model.SettingItem { {Key: conf.LdapDefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.LDAP, Flag: model.PRIVATE}, {Key: conf.LdapLoginTips, Value: "login with ldap", Type: conf.TypeString, Group: model.LDAP, Flag: model.PUBLIC}, - //s3 settings + // s3 settings {Key: conf.S3AccessKeyId, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, {Key: conf.S3SecretAccessKey, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, {Key: conf.S3Buckets, Value: "[]", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, - //ftp settings + // ftp settings {Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, {Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE}, {Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " + @@ -205,6 +206,18 @@ func InitialSettings() []model.SettingItem { {Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE}, {Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, {Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, + + // traffic settings + {Key: conf.TaskOfflineDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Download.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskOfflineDownloadTransferThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Transfer.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Upload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskCopyThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Copy.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskDecompressDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Decompress.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskDecompressUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.DecompressUpload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.StreamMaxClientDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.StreamMaxClientUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.StreamMaxServerDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.StreamMaxServerUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, } initialSettingItems = append(initialSettingItems, tool.Tools.Items()...) if flags.Dev { diff --git a/internal/bootstrap/stream_limit.go b/internal/bootstrap/stream_limit.go new file mode 100644 index 00000000..5ece71e4 --- /dev/null +++ b/internal/bootstrap/stream_limit.go @@ -0,0 +1,53 @@ +package bootstrap + +import ( + "context" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/stream" + "golang.org/x/time/rate" +) + +type blockBurstLimiter struct { + *rate.Limiter +} + +func (l blockBurstLimiter) WaitN(ctx context.Context, total int) error { + for total > 0 { + n := l.Burst() + if l.Limiter.Limit() == rate.Inf || n > total { + n = total + } + err := l.Limiter.WaitN(ctx, n) + if err != nil { + return err + } + total -= n + } + return nil +} + +func streamFilterNegative(limit int) (rate.Limit, int) { + if limit < 0 { + return rate.Inf, 0 + } + return rate.Limit(limit) * 1024.0, limit * 1024 +} + +func initLimiter(limiter *stream.Limiter, s string) { + clientDownLimit, burst := streamFilterNegative(setting.GetInt(s, -1)) + *limiter = blockBurstLimiter{Limiter: rate.NewLimiter(clientDownLimit, burst)} + op.RegisterSettingChangingCallback(func() { + newLimit, newBurst := streamFilterNegative(setting.GetInt(s, -1)) + (*limiter).SetLimit(newLimit) + (*limiter).SetBurst(newBurst) + }) +} + +func InitStreamLimit() { + initLimiter(&stream.ClientDownloadLimit, conf.StreamMaxClientDownloadSpeed) + initLimiter(&stream.ClientUploadLimit, conf.StreamMaxClientUploadSpeed) + initLimiter(&stream.ServerDownloadLimit, conf.StreamMaxServerDownloadSpeed) + initLimiter(&stream.ServerUploadLimit, conf.StreamMaxServerUploadSpeed) +} diff --git a/internal/bootstrap/task.go b/internal/bootstrap/task.go index 9c30c392..c67e3029 100644 --- a/internal/bootstrap/task.go +++ b/internal/bootstrap/task.go @@ -5,17 +5,44 @@ import ( "github.com/alist-org/alist/v3/internal/db" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/offline_download/tool" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" "github.com/xhofe/tache" ) +func taskFilterNegative(num int) int64 { + if num < 0 { + num = 0 + } + return int64(num) +} + func InitTaskManager() { - fs.UploadTaskManager = tache.NewManager[*fs.UploadTask](tache.WithWorks(conf.Conf.Tasks.Upload.Workers), tache.WithMaxRetry(conf.Conf.Tasks.Upload.MaxRetry)) //upload will not support persist - fs.CopyTaskManager = tache.NewManager[*fs.CopyTask](tache.WithWorks(conf.Conf.Tasks.Copy.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant), db.UpdateTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Copy.MaxRetry)) - tool.DownloadTaskManager = tache.NewManager[*tool.DownloadTask](tache.WithWorks(conf.Conf.Tasks.Download.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant), db.UpdateTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Download.MaxRetry)) - tool.TransferTaskManager = tache.NewManager[*tool.TransferTask](tache.WithWorks(conf.Conf.Tasks.Transfer.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant), db.UpdateTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Transfer.MaxRetry)) + fs.UploadTaskManager = tache.NewManager[*fs.UploadTask](tache.WithWorks(setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers)), tache.WithMaxRetry(conf.Conf.Tasks.Upload.MaxRetry)) //upload will not support persist + op.RegisterSettingChangingCallback(func() { + fs.UploadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers))) + }) + fs.CopyTaskManager = tache.NewManager[*fs.CopyTask](tache.WithWorks(setting.GetInt(conf.TaskCopyThreadsNum, conf.Conf.Tasks.Copy.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant), db.UpdateTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Copy.MaxRetry)) + op.RegisterSettingChangingCallback(func() { + fs.CopyTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskCopyThreadsNum, conf.Conf.Tasks.Copy.Workers))) + }) + tool.DownloadTaskManager = tache.NewManager[*tool.DownloadTask](tache.WithWorks(setting.GetInt(conf.TaskOfflineDownloadThreadsNum, conf.Conf.Tasks.Download.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant), db.UpdateTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Download.MaxRetry)) + op.RegisterSettingChangingCallback(func() { + tool.DownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadThreadsNum, conf.Conf.Tasks.Download.Workers))) + }) + tool.TransferTaskManager = tache.NewManager[*tool.TransferTask](tache.WithWorks(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant), db.UpdateTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Transfer.MaxRetry)) + op.RegisterSettingChangingCallback(func() { + tool.TransferTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers))) + }) if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted CleanTempDir() } - fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(conf.Conf.Tasks.Decompress.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry)) - fs.ArchiveContentUploadTaskManager.Manager = tache.NewManager[*fs.ArchiveContentUploadTask](tache.WithWorks(conf.Conf.Tasks.DecompressUpload.Workers), tache.WithMaxRetry(conf.Conf.Tasks.DecompressUpload.MaxRetry)) //decompress upload will not support persist + fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry)) + op.RegisterSettingChangingCallback(func() { + fs.ArchiveDownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers))) + }) + fs.ArchiveContentUploadTaskManager.Manager = tache.NewManager[*fs.ArchiveContentUploadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressUploadThreadsNum, conf.Conf.Tasks.DecompressUpload.Workers)), tache.WithMaxRetry(conf.Conf.Tasks.DecompressUpload.MaxRetry)) //decompress upload will not support persist + op.RegisterSettingChangingCallback(func() { + fs.ArchiveContentUploadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressUploadThreadsNum, conf.Conf.Tasks.DecompressUpload.Workers))) + }) } diff --git a/internal/conf/const.go b/internal/conf/const.go index 0e534350..fa286e46 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -115,6 +115,18 @@ const ( FTPImplicitTLS = "ftp_implicit_tls" FTPTLSPrivateKeyPath = "ftp_tls_private_key_path" FTPTLSPublicCertPath = "ftp_tls_public_cert_path" + + // traffic + TaskOfflineDownloadThreadsNum = "offline_download_task_threads_num" + TaskOfflineDownloadTransferThreadsNum = "offline_download_transfer_task_threads_num" + TaskUploadThreadsNum = "upload_task_threads_num" + TaskCopyThreadsNum = "copy_task_threads_num" + TaskDecompressDownloadThreadsNum = "decompress_download_task_threads_num" + TaskDecompressUploadThreadsNum = "decompress_upload_task_threads_num" + StreamMaxClientDownloadSpeed = "max_client_download_speed" + StreamMaxClientUploadSpeed = "max_client_upload_speed" + StreamMaxServerDownloadSpeed = "max_server_download_speed" + StreamMaxServerUploadSpeed = "max_server_upload_speed" ) const ( diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 292f8e6a..05f0fe24 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -77,6 +77,29 @@ type Remove interface { } type Put interface { + // Put a file (provided as a FileStreamer) into the driver + // Besides the most basic upload functionality, the following features also need to be implemented: + // 1. Canceling (when `<-ctx.Done()` returns), by the following methods: + // (1) Use request methods that carry context, such as the following: + // a. http.NewRequestWithContext + // b. resty.Request.SetContext + // c. s3manager.Uploader.UploadWithContext + // d. utils.CopyWithCtx + // (2) Use a `driver.ReaderWithCtx` or a `driver.NewLimitedUploadStream` + // (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process, + // this is typically applicable to chunked uploads. + // 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows: + // (1) Use `utils.CopyWithCtx` + // (2) Use `driver.ReaderUpdatingProgress` + // (3) Use `driver.Progress` with `io.TeeReader` + // 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream + // in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and + // before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN` + // if your file chunks are sufficiently small (less than about 50KB). + // NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if + // you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive + // mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive + // memory usage caused by buffering too many file chunks awaiting upload. Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error } @@ -113,6 +136,29 @@ type CopyResult interface { } type PutResult interface { + // Put a file (provided as a FileStreamer) into the driver and return the put obj + // Besides the most basic upload functionality, the following features also need to be implemented: + // 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods: + // (1) Use request methods that carry context, such as the following: + // a. http.NewRequestWithContext + // b. resty.Request.SetContext + // c. s3manager.Uploader.UploadWithContext + // d. utils.CopyWithCtx + // (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream` + // (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process, + // this is typically applicable to chunked uploads. + // 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows: + // (1) Use `utils.CopyWithCtx` + // (2) Use `driver.ReaderUpdatingProgress` + // (3) Use `driver.Progress` with `io.TeeReader` + // 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream + // in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and + // before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN` + // if your file chunks are sufficiently small (less than about 50KB). + // NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if + // you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive + // mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive + // memory usage caused by buffering too many file chunks awaiting upload. Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error) } @@ -159,28 +205,6 @@ type ArchiveDecompressResult interface { ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) } -type UpdateProgress = model.UpdateProgress - -type Progress struct { - Total int64 - Done int64 - up UpdateProgress -} - -func (p *Progress) Write(b []byte) (n int, err error) { - n = len(b) - p.Done += int64(n) - p.up(float64(p.Done) / float64(p.Total) * 100) - return -} - -func NewProgress(total int64, up UpdateProgress) *Progress { - return &Progress{ - Total: total, - up: up, - } -} - type Reference interface { InitReference(storage Driver) error } diff --git a/internal/driver/utils.go b/internal/driver/utils.go new file mode 100644 index 00000000..2af850ec --- /dev/null +++ b/internal/driver/utils.go @@ -0,0 +1,62 @@ +package driver + +import ( + "context" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "io" +) + +type UpdateProgress = model.UpdateProgress + +type Progress struct { + Total int64 + Done int64 + up UpdateProgress +} + +func (p *Progress) Write(b []byte) (n int, err error) { + n = len(b) + p.Done += int64(n) + p.up(float64(p.Done) / float64(p.Total) * 100) + return +} + +func NewProgress(total int64, up UpdateProgress) *Progress { + return &Progress{ + Total: total, + up: up, + } +} + +type RateLimitReader = stream.RateLimitReader + +type RateLimitWriter = stream.RateLimitWriter + +type RateLimitFile = stream.RateLimitFile + +func NewLimitedUploadStream(ctx context.Context, r io.Reader) *RateLimitReader { + return &RateLimitReader{ + Reader: r, + Limiter: stream.ServerUploadLimit, + Ctx: ctx, + } +} + +func NewLimitedUploadFile(ctx context.Context, f model.File) *RateLimitFile { + return &RateLimitFile{ + File: f, + Limiter: stream.ServerUploadLimit, + Ctx: ctx, + } +} + +func ServerUploadLimitWaitN(ctx context.Context, n int) error { + return stream.ServerUploadLimit.WaitN(ctx, n) +} + +type ReaderWithCtx = stream.ReaderWithCtx + +type ReaderUpdatingProgress = stream.ReaderUpdatingProgress + +type SimpleReaderWithSize = stream.SimpleReaderWithSize diff --git a/internal/model/setting.go b/internal/model/setting.go index 9b60d98a..93b81fe5 100644 --- a/internal/model/setting.go +++ b/internal/model/setting.go @@ -12,6 +12,7 @@ const ( LDAP S3 FTP + TRAFFIC ) const ( diff --git a/internal/net/serve.go b/internal/net/serve.go index 6216cd21..c75e611f 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -3,6 +3,7 @@ package net import ( "compress/gzip" "context" + "crypto/tls" "fmt" "io" "mime" @@ -14,7 +15,6 @@ import ( "sync" "time" - "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/http_range" @@ -264,7 +264,7 @@ var httpClient *http.Client func HttpClient() *http.Client { once.Do(func() { - httpClient = base.NewHttpClient() + httpClient = NewHttpClient() httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { if len(via) >= 10 { return errors.New("stopped after 10 redirects") @@ -275,3 +275,13 @@ func HttpClient() *http.Client { }) return httpClient } + +func NewHttpClient() *http.Client { + return &http.Client{ + Timeout: time.Hour * 48, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}, + }, + } +} diff --git a/internal/op/setting.go b/internal/op/setting.go index 50eba3f7..36a792b0 100644 --- a/internal/op/setting.go +++ b/internal/op/setting.go @@ -26,9 +26,18 @@ var settingGroupCacheF = func(key string, item []model.SettingItem) { settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour)) } +var settingChangingCallbacks = make([]func(), 0) + +func RegisterSettingChangingCallback(f func()) { + settingChangingCallbacks = append(settingChangingCallbacks, f) +} + func SettingCacheUpdate() { settingCache.Clear() settingGroupCache.Clear() + for _, cb := range settingChangingCallbacks { + cb() + } } func GetPublicSettingsMap() map[string]string { diff --git a/internal/stream/limit.go b/internal/stream/limit.go new file mode 100644 index 00000000..3b32a55f --- /dev/null +++ b/internal/stream/limit.go @@ -0,0 +1,152 @@ +package stream + +import ( + "context" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/alist-org/alist/v3/pkg/utils" + "golang.org/x/time/rate" + "io" + "time" +) + +type Limiter interface { + Limit() rate.Limit + Burst() int + TokensAt(time.Time) float64 + Tokens() float64 + Allow() bool + AllowN(time.Time, int) bool + Reserve() *rate.Reservation + ReserveN(time.Time, int) *rate.Reservation + Wait(context.Context) error + WaitN(context.Context, int) error + SetLimit(rate.Limit) + SetLimitAt(time.Time, rate.Limit) + SetBurst(int) + SetBurstAt(time.Time, int) +} + +var ( + ClientDownloadLimit Limiter + ClientUploadLimit Limiter + ServerDownloadLimit Limiter + ServerUploadLimit Limiter +) + +type RateLimitReader struct { + io.Reader + Limiter Limiter + Ctx context.Context +} + +func (r *RateLimitReader) Read(p []byte) (n int, err error) { + if r.Ctx != nil && utils.IsCanceled(r.Ctx) { + return 0, r.Ctx.Err() + } + n, err = r.Reader.Read(p) + if err != nil { + return + } + if r.Limiter != nil { + if r.Ctx == nil { + r.Ctx = context.Background() + } + err = r.Limiter.WaitN(r.Ctx, n) + } + return +} + +func (r *RateLimitReader) Close() error { + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +type RateLimitWriter struct { + io.Writer + Limiter Limiter + Ctx context.Context +} + +func (w *RateLimitWriter) Write(p []byte) (n int, err error) { + if w.Ctx != nil && utils.IsCanceled(w.Ctx) { + return 0, w.Ctx.Err() + } + n, err = w.Writer.Write(p) + if err != nil { + return + } + if w.Limiter != nil { + if w.Ctx == nil { + w.Ctx = context.Background() + } + err = w.Limiter.WaitN(w.Ctx, n) + } + return +} + +func (w *RateLimitWriter) Close() error { + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} + +type RateLimitFile struct { + model.File + Limiter Limiter + Ctx context.Context +} + +func (r *RateLimitFile) Read(p []byte) (n int, err error) { + if r.Ctx != nil && utils.IsCanceled(r.Ctx) { + return 0, r.Ctx.Err() + } + n, err = r.File.Read(p) + if err != nil { + return + } + if r.Limiter != nil { + if r.Ctx == nil { + r.Ctx = context.Background() + } + err = r.Limiter.WaitN(r.Ctx, n) + } + return +} + +func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) { + if r.Ctx != nil && utils.IsCanceled(r.Ctx) { + return 0, r.Ctx.Err() + } + n, err = r.File.ReadAt(p, off) + if err != nil { + return + } + if r.Limiter != nil { + if r.Ctx == nil { + r.Ctx = context.Background() + } + err = r.Limiter.WaitN(r.Ctx, n) + } + return +} + +type RateLimitRangeReadCloser struct { + model.RangeReadCloserIF + Limiter Limiter +} + +func (rrc RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { + rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange) + if err != nil { + return nil, err + } + return &RateLimitReader{ + Reader: rc, + Limiter: rrc.Limiter, + Ctx: ctx, + }, nil +} diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 74646bfb..5eb6bdc7 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -182,14 +182,24 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) } if ss.Link != nil { if ss.Link.MFile != nil { - ss.mFile = ss.Link.MFile - ss.Reader = ss.Link.MFile - ss.Closers.Add(ss.Link.MFile) + mFile := ss.Link.MFile + if _, ok := mFile.(*os.File); !ok { + mFile = &RateLimitFile{ + File: mFile, + Limiter: ServerDownloadLimit, + Ctx: fs.Ctx, + } + } + ss.mFile = mFile + ss.Reader = mFile + ss.Closers.Add(mFile) return &ss, nil } - if ss.Link.RangeReadCloser != nil { - ss.rangeReadCloser = ss.Link.RangeReadCloser + ss.rangeReadCloser = RateLimitRangeReadCloser{ + RangeReadCloserIF: ss.Link.RangeReadCloser, + Limiter: ServerDownloadLimit, + } ss.Add(ss.rangeReadCloser) return &ss, nil } @@ -198,6 +208,10 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) if err != nil { return nil, err } + rrc = RateLimitRangeReadCloser{ + RangeReadCloserIF: rrc, + Limiter: ServerDownloadLimit, + } ss.rangeReadCloser = rrc ss.Add(rrc) return &ss, nil @@ -259,7 +273,7 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) { if ss.tmpFile != nil { return ss.tmpFile, nil } - if ss.mFile != nil { + if _, ok := ss.mFile.(*os.File); ok { return ss.mFile, nil } tmpF, err := utils.CreateTempFile(ss, ss.GetSize()) @@ -276,7 +290,7 @@ func (ss *SeekableStream) CacheFullInTempFileAndUpdateProgress(up model.UpdatePr if ss.tmpFile != nil { return ss.tmpFile, nil } - if ss.mFile != nil { + if _, ok := ss.mFile.(*os.File); ok { return ss.mFile, nil } tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{ @@ -293,12 +307,13 @@ func (ss *SeekableStream) CacheFullInTempFileAndUpdateProgress(up model.UpdatePr } func (f *FileStream) SetTmpFile(r *os.File) { - f.Reader = r + f.Add(r) f.tmpFile = r + f.Reader = r } type ReaderWithSize interface { - io.Reader + io.ReadCloser GetSize() int64 } @@ -311,6 +326,13 @@ func (r *SimpleReaderWithSize) GetSize() int64 { return r.Size } +func (r *SimpleReaderWithSize) Close() error { + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + type ReaderUpdatingProgress struct { Reader ReaderWithSize model.UpdateProgress @@ -324,6 +346,10 @@ func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) { return n, err } +func (r *ReaderUpdatingProgress) Close() error { + return r.Reader.Close() +} + type SStreamReadAtSeeker interface { model.File GetRawStream() *SeekableStream @@ -534,7 +560,7 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { func (r *RangeReadReadAtSeeker) Close() error { if r.headCache != nil { - r.headCache.close() + _ = r.headCache.close() } return r.ss.Close() } @@ -562,17 +588,3 @@ func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) { func (f *FileReadAtSeeker) Close() error { return f.ss.Close() } - -type ReaderWithCtx struct { - io.Reader - Ctx context.Context -} - -func (r *ReaderWithCtx) Read(p []byte) (n int, err error) { - select { - case <-r.Ctx.Done(): - return 0, r.Ctx.Err() - default: - return r.Reader.Read(p) - } -} diff --git a/internal/stream/util.go b/internal/stream/util.go index 16854c38..bb5019e0 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -3,6 +3,7 @@ package stream import ( "context" "fmt" + "github.com/alist-org/alist/v3/pkg/utils" "io" "net/http" @@ -76,3 +77,22 @@ func checkContentRange(header *http.Header, offset int64) bool { } return false } + +type ReaderWithCtx struct { + io.Reader + Ctx context.Context +} + +func (r *ReaderWithCtx) Read(p []byte) (n int, err error) { + if utils.IsCanceled(r.Ctx) { + return 0, r.Ctx.Err() + } + return r.Reader.Read(p) +} + +func (r *ReaderWithCtx) Close() error { + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/server/common/proxy.go b/server/common/proxy.go index 2d828efd..66854976 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "net/url" + "os" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" @@ -23,11 +24,22 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. if contentType != "" { w.Header().Set("Content-Type", contentType) } - http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile) + mFile := link.MFile + if _, ok := mFile.(*os.File); !ok { + mFile = &stream.RateLimitFile{ + File: mFile, + Limiter: stream.ServerDownloadLimit, + Ctx: r.Context(), + } + } + http.ServeContent(w, r, file.GetName(), file.ModTime(), mFile) return nil } else if link.RangeReadCloser != nil { attachFileName(w, file) - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser) + net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ + RangeReadCloserIF: link.RangeReadCloser, + Limiter: stream.ServerDownloadLimit, + }) return nil } else if link.Concurrency != 0 || link.PartSize != 0 { attachFileName(w, file) @@ -47,7 +59,10 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. rc, err := down.Download(ctx, req) return rc, err } - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &model.RangeReadCloser{RangeReader: rangeReader}) + net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ + RangeReadCloserIF: &model.RangeReadCloser{RangeReader: rangeReader}, + Limiter: stream.ServerDownloadLimit, + }) return nil } else { //transparent proxy @@ -65,7 +80,11 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. if r.Method == http.MethodHead { return nil } - _, err = utils.CopyWithBuffer(w, res.Body) + _, err = utils.CopyWithBuffer(w, &stream.RateLimitReader{ + Reader: res.Body, + Limiter: stream.ServerDownloadLimit, + Ctx: r.Context(), + }) if err != nil { return err } diff --git a/server/ftp/fsread.go b/server/ftp/fsread.go index f7e018e0..c051a19d 100644 --- a/server/ftp/fsread.go +++ b/server/ftp/fsread.go @@ -60,7 +60,12 @@ func OpenDownload(ctx context.Context, reqPath string, offset int64) (*FileDownl } func (f *FileDownloadProxy) Read(p []byte) (n int, err error) { - return f.reader.Read(p) + n, err = f.reader.Read(p) + if err != nil { + return + } + err = stream.ClientDownloadLimit.WaitN(f.reader.GetRawStream().Ctx, n) + return } func (f *FileDownloadProxy) Write(p []byte) (n int, err error) { diff --git a/server/ftp/fsup.go b/server/ftp/fsup.go index 4d626d0e..ee38b1bf 100644 --- a/server/ftp/fsup.go +++ b/server/ftp/fsup.go @@ -59,7 +59,12 @@ func (f *FileUploadProxy) Read(p []byte) (n int, err error) { } func (f *FileUploadProxy) Write(p []byte) (n int, err error) { - return f.buffer.Write(p) + n, err = f.buffer.Write(p) + if err != nil { + return + } + err = stream.ClientUploadLimit.WaitN(f.ctx, n) + return } func (f *FileUploadProxy) Seek(offset int64, whence int) (int64, error) { @@ -96,7 +101,6 @@ func (f *FileUploadProxy) Close() error { WebPutAsTask: true, } s.SetTmpFile(f.buffer) - s.Closers.Add(f.buffer) _, err = fs.PutAsTask(f.ctx, dir, s) return err } @@ -127,7 +131,7 @@ func (f *FileUploadWithLengthProxy) Read(p []byte) (n int, err error) { return 0, errs.NotSupport } -func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) { +func (f *FileUploadWithLengthProxy) write(p []byte) (n int, err error) { if f.pipeWriter != nil { select { case e := <-f.errChan: @@ -174,6 +178,15 @@ func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) { } } +func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) { + n, err = f.write(p) + if err != nil { + return + } + err = stream.ClientUploadLimit.WaitN(f.ctx, n) + return +} + func (f *FileUploadWithLengthProxy) Seek(offset int64, whence int) (int64, error) { return 0, errs.NotSupport } diff --git a/server/middlewares/limit.go b/server/middlewares/limit.go index 44c079b3..2ccee950 100644 --- a/server/middlewares/limit.go +++ b/server/middlewares/limit.go @@ -1,7 +1,9 @@ package middlewares import ( + "github.com/alist-org/alist/v3/internal/stream" "github.com/gin-gonic/gin" + "io" ) func MaxAllowed(n int) gin.HandlerFunc { @@ -14,3 +16,37 @@ func MaxAllowed(n int) gin.HandlerFunc { c.Next() } } + +func UploadRateLimiter(limiter stream.Limiter) gin.HandlerFunc { + return func(c *gin.Context) { + c.Request.Body = &stream.RateLimitReader{ + Reader: c.Request.Body, + Limiter: limiter, + Ctx: c, + } + c.Next() + } +} + +type ResponseWriterWrapper struct { + gin.ResponseWriter + WrapWriter io.Writer +} + +func (w *ResponseWriterWrapper) Write(p []byte) (n int, err error) { + return w.WrapWriter.Write(p) +} + +func DownloadRateLimiter(limiter stream.Limiter) gin.HandlerFunc { + return func(c *gin.Context) { + c.Writer = &ResponseWriterWrapper{ + ResponseWriter: c.Writer, + WrapWriter: &stream.RateLimitWriter{ + Writer: c.Writer, + Limiter: limiter, + Ctx: c, + }, + } + c.Next() + } +} diff --git a/server/router.go b/server/router.go index 63bad60f..830051d8 100644 --- a/server/router.go +++ b/server/router.go @@ -4,6 +4,7 @@ import ( "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/message" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/alist-org/alist/v3/server/handles" @@ -38,13 +39,14 @@ func Init(e *gin.Engine) { WebDav(g.Group("/dav")) S3(g.Group("/s3")) - g.GET("/d/*path", middlewares.Down, handles.Down) - g.GET("/p/*path", middlewares.Down, handles.Proxy) + downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit) + g.GET("/d/*path", middlewares.Down, downloadLimiter, handles.Down) + g.GET("/p/*path", middlewares.Down, downloadLimiter, handles.Proxy) g.HEAD("/d/*path", middlewares.Down, handles.Down) g.HEAD("/p/*path", middlewares.Down, handles.Proxy) - g.GET("/ad/*path", middlewares.Down, handles.ArchiveDown) - g.GET("/ap/*path", middlewares.Down, handles.ArchiveProxy) - g.GET("/ae/*path", middlewares.Down, handles.ArchiveInternalExtract) + g.GET("/ad/*path", middlewares.Down, downloadLimiter, handles.ArchiveDown) + g.GET("/ap/*path", middlewares.Down, downloadLimiter, handles.ArchiveProxy) + g.GET("/ae/*path", middlewares.Down, downloadLimiter, handles.ArchiveInternalExtract) g.HEAD("/ad/*path", middlewares.Down, handles.ArchiveDown) g.HEAD("/ap/*path", middlewares.Down, handles.ArchiveProxy) g.HEAD("/ae/*path", middlewares.Down, handles.ArchiveInternalExtract) @@ -173,8 +175,9 @@ func _fs(g *gin.RouterGroup) { g.POST("/copy", handles.FsCopy) g.POST("/remove", handles.FsRemove) g.POST("/remove_empty_directory", handles.FsRemoveEmptyDirectory) - g.PUT("/put", middlewares.FsUp, handles.FsStream) - g.PUT("/form", middlewares.FsUp, handles.FsForm) + uploadLimiter := middlewares.UploadRateLimiter(stream.ClientUploadLimit) + g.PUT("/put", middlewares.FsUp, uploadLimiter, handles.FsStream) + g.PUT("/form", middlewares.FsUp, uploadLimiter, handles.FsForm) g.POST("/link", middlewares.AuthAdmin, handles.Link) // g.POST("/add_aria2", handles.AddOfflineDownload) // g.POST("/add_qbit", handles.AddQbittorrent) diff --git a/server/webdav.go b/server/webdav.go index cdfdce7d..a735e285 100644 --- a/server/webdav.go +++ b/server/webdav.go @@ -3,6 +3,8 @@ package server import ( "context" "crypto/subtle" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/server/middlewares" "net/http" "path" "strings" @@ -27,8 +29,10 @@ func WebDav(dav *gin.RouterGroup) { }, } dav.Use(WebDAVAuth) - dav.Any("/*path", ServeWebDAV) - dav.Any("", ServeWebDAV) + uploadLimiter := middlewares.UploadRateLimiter(stream.ClientUploadLimit) + downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit) + dav.Any("/*path", uploadLimiter, downloadLimiter, ServeWebDAV) + dav.Any("", uploadLimiter, downloadLimiter, ServeWebDAV) dav.Handle("PROPFIND", "/*path", ServeWebDAV) dav.Handle("PROPFIND", "", ServeWebDAV) dav.Handle("MKCOL", "/*path", ServeWebDAV) From 30d8c2075630e92fc93c4057bb189b4172219d73 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Sun, 16 Feb 2025 12:24:10 +0800 Subject: [PATCH 119/187] feat(archive): support deprioritize previewing (#7984) --- internal/bootstrap/data/setting.go | 1 + internal/conf/const.go | 17 +++++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index de3b8af9..026a89e1 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -140,6 +140,7 @@ func InitialSettings() []model.SettingItem { {Key: "audio_cover", Value: "https://jsd.nn.ci/gh/alist-org/logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW}, {Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, {Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, + {Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, // global settings {Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL}, {Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL}, diff --git a/internal/conf/const.go b/internal/conf/const.go index fa286e46..2234e9bc 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -22,14 +22,15 @@ const ( MainColor = "main_color" // preview - TextTypes = "text_types" - AudioTypes = "audio_types" - VideoTypes = "video_types" - ImageTypes = "image_types" - ProxyTypes = "proxy_types" - ProxyIgnoreHeaders = "proxy_ignore_headers" - AudioAutoplay = "audio_autoplay" - VideoAutoplay = "video_autoplay" + TextTypes = "text_types" + AudioTypes = "audio_types" + VideoTypes = "video_types" + ImageTypes = "image_types" + ProxyTypes = "proxy_types" + ProxyIgnoreHeaders = "proxy_ignore_headers" + AudioAutoplay = "audio_autoplay" + VideoAutoplay = "video_autoplay" + PreviewArchivesByDefault = "preview_archives_by_default" // global HideFiles = "hide_files" From c230f24ebedfa96e9cf949df8066914373cd9eef Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Sun, 16 Feb 2025 12:25:01 +0800 Subject: [PATCH 120/187] fix(archive): decode filename when decompressing zips (#7998 close #7988) --- internal/archive/zip/utils.go | 45 +++++++++++++++++++++++++++++++---- internal/archive/zip/zip.go | 2 +- server/handles/archive.go | 2 +- 3 files changed, 43 insertions(+), 6 deletions(-) diff --git a/internal/archive/zip/utils.go b/internal/archive/zip/utils.go index 81b47782..aa51b88e 100644 --- a/internal/archive/zip/utils.go +++ b/internal/archive/zip/utils.go @@ -59,7 +59,7 @@ func _decompress(file *zip.File, targetPath, password string, up model.UpdatePro return err } defer rc.Close() - f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + f, err := os.OpenFile(stdpath.Join(targetPath, decodeName(file.FileInfo().Name())), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } @@ -87,12 +87,27 @@ func filterPassword(err error) error { func decodeName(name string) string { b := []byte(name) detector := chardet.NewTextDetector() - result, err := detector.DetectBest(b) + results, err := detector.DetectAll(b) if err != nil { return name } - enc := getEncoding(result.Charset) - if enc == nil { + var ce, re, enc encoding.Encoding + for _, r := range results { + if r.Confidence > 30 { + ce = getCommonEncoding(r.Charset) + if ce != nil { + break + } + } + if re == nil { + re = getEncoding(r.Charset) + } + } + if ce != nil { + enc = ce + } else if re != nil { + enc = re + } else { return name } i := bytes.NewReader(b) @@ -101,8 +116,30 @@ func decodeName(name string) string { return string(content) } +func getCommonEncoding(name string) (enc encoding.Encoding) { + switch name { + case "UTF-8": + enc = unicode.UTF8 + case "UTF-16LE": + enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM) + case "Shift_JIS": + enc = japanese.ShiftJIS + case "GB-18030": + enc = simplifiedchinese.GB18030 + case "EUC-KR": + enc = korean.EUCKR + case "Big5": + enc = traditionalchinese.Big5 + default: + enc = nil + } + return +} + func getEncoding(name string) (enc encoding.Encoding) { switch name { + case "UTF-8": + enc = unicode.UTF8 case "UTF-16BE": enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM) case "UTF-16LE": diff --git a/internal/archive/zip/zip.go b/internal/archive/zip/zip.go index e5285518..9dc8cc76 100644 --- a/internal/archive/zip/zip.go +++ b/internal/archive/zip/zip.go @@ -35,7 +35,6 @@ func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.Ar for _, file := range zipReader.File { if file.IsEncrypted() { encrypted = true - break } name := strings.TrimPrefix(decodeName(file.Name), "/") @@ -70,6 +69,7 @@ func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.Ar dirObj.IsFolder = true dirObj.Name = stdpath.Base(dir) dirObj.Modified = file.ModTime() + dirObj.Children = make([]model.ObjTree, 0) } if isNewFolder { // 将 文件夹 添加到 父文件夹 diff --git a/server/handles/archive.go b/server/handles/archive.go index 6ff13641..fab3916e 100644 --- a/server/handles/archive.go +++ b/server/handles/archive.go @@ -39,7 +39,7 @@ type ArchiveMetaResp struct { type ArchiveContentResp struct { ObjResp - Children []ArchiveContentResp `json:"children,omitempty"` + Children []ArchiveContentResp `json:"children"` } func toObjsRespWithoutSignAndThumb(obj model.Obj) ObjResp { From 79bef0be9ee14b1087c4e639015674665c947001 Mon Sep 17 00:00:00 2001 From: KirCute_ECT <951206789@qq.com> Date: Sun, 16 Feb 2025 15:11:48 +0800 Subject: [PATCH 121/187] chore: fix build failed (#8005) --- drivers/ilanzou/driver.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/ilanzou/driver.go b/drivers/ilanzou/driver.go index 697d85b1..39a311dd 100644 --- a/drivers/ilanzou/driver.go +++ b/drivers/ilanzou/driver.go @@ -13,8 +13,6 @@ import ( "strings" "time" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" From cdc41595bcc1094c8ff2f7a2b7b40763dfb87492 Mon Sep 17 00:00:00 2001 From: KirCute <951206789@qq.com> Date: Mon, 24 Feb 2025 23:12:23 +0800 Subject: [PATCH 122/187] feat(github): support GPG verification (#7996 close #7986) * feat(github): support GPG verification * chore --- drivers/github/driver.go | 134 ++++++++++++++++++++++++--------------- drivers/github/meta.go | 32 +++++----- drivers/github/types.go | 7 +- drivers/github/util.go | 72 ++++++++++++++++++++- go.mod | 2 + go.sum | 15 +++++ 6 files changed, 193 insertions(+), 69 deletions(-) diff --git a/drivers/github/driver.go b/drivers/github/driver.go index d1cfd9fb..dedd4945 100644 --- a/drivers/github/driver.go +++ b/drivers/github/driver.go @@ -3,7 +3,6 @@ package github import ( "context" "encoding/base64" - "errors" "fmt" "io" "net/http" @@ -12,12 +11,14 @@ import ( "sync" "text/template" + "github.com/ProtonMail/go-crypto/openpgp" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -33,6 +34,7 @@ type Github struct { moveMsgTmpl *template.Template isOnBranch bool commitMutex sync.Mutex + pgpEntity *openpgp.Entity } func (d *Github) Config() driver.Config { @@ -102,6 +104,26 @@ func (d *Github) Init(ctx context.Context) error { _, err = d.getBranchHead() d.isOnBranch = err == nil } + if d.GPGPrivateKey != "" { + if d.CommitterName == "" || d.AuthorName == "" { + user, e := d.getAuthenticatedUser() + if e != nil { + return e + } + if d.CommitterName == "" { + d.CommitterName = user.Name + d.CommitterEmail = user.Email + } + if d.AuthorName == "" { + d.AuthorName = user.Name + d.AuthorEmail = user.Email + } + } + d.pgpEntity, err = loadPrivateKey(d.GPGPrivateKey, d.GPGKeyPassphrase) + if err != nil { + return err + } + } return nil } @@ -174,10 +196,39 @@ func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin if parent.Entries == nil { return errs.NotFolder } - // if parent folder contains .gitkeep only, mark it and delete .gitkeep later - gitKeepSha := "" + subDirSha, err := d.newTree("", []interface{}{ + map[string]string{ + "path": ".gitkeep", + "mode": "100644", + "type": "blob", + "content": "", + }, + }) + if err != nil { + return err + } + newTree := make([]interface{}, 0, 2) + newTree = append(newTree, TreeObjReq{ + Path: dirName, + Mode: "040000", + Type: "tree", + Sha: subDirSha, + }) if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" { - gitKeepSha = parent.Entries[0].Sha + newTree = append(newTree, TreeObjReq{ + Path: ".gitkeep", + Mode: "100644", + Type: "blob", + Sha: nil, + }) + } + newSha, err := d.newTree(parent.Sha, newTree) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(parentDir.GetPath(), parent.Sha, newSha, "/") + if err != nil { + return err } commitMessage, err := getMessage(d.mkdirMsgTmpl, &MessageTemplateVars{ @@ -190,13 +241,7 @@ func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin if err != nil { return err } - if err = d.createGitKeep(stdpath.Join(parentDir.GetPath(), dirName), commitMessage); err != nil { - return err - } - if gitKeepSha != "" { - err = d.delete(stdpath.Join(parentDir.GetPath(), ".gitkeep"), gitKeepSha, commitMessage) - } - return err + return d.commit(commitMessage, rootSha) } func (d *Github) Move(ctx context.Context, srcObj, dstDir model.Obj) error { @@ -639,24 +684,6 @@ func (d *Github) get(path string) (*Object, error) { return &resp, err } -func (d *Github) createGitKeep(path, message string) error { - body := map[string]interface{}{ - "message": message, - "content": "", - "branch": d.Ref, - } - d.addCommitterAndAuthor(&body) - - res, err := d.client.R().SetBody(body).Put(d.getContentApiUrl(stdpath.Join(path, ".gitkeep"))) - if err != nil { - return err - } - if res.StatusCode() != 200 && res.StatusCode() != 201 { - return toErr(res) - } - return nil -} - func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.UpdateProgress) (string, error) { beforeContent := "{\"encoding\":\"base64\",\"content\":\"" afterContent := "\"}" @@ -717,23 +744,6 @@ func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.Up return resp.Sha, nil } -func (d *Github) delete(path, sha, message string) error { - body := map[string]interface{}{ - "message": message, - "sha": sha, - "branch": d.Ref, - } - d.addCommitterAndAuthor(&body) - res, err := d.client.R().SetBody(body).Delete(d.getContentApiUrl(path)) - if err != nil { - return err - } - if res.StatusCode() != 200 { - return toErr(res) - } - return nil -} - func (d *Github) renewParentTrees(path, prevSha, curSha, until string) (string, error) { for path != until { path = stdpath.Dir(path) @@ -795,11 +805,11 @@ func (d *Github) getTreeDirectly(path string) (*TreeResp, string, error) { } func (d *Github) newTree(baseSha string, tree []interface{}) (string, error) { - res, err := d.client.R(). - SetBody(&TreeReq{ - BaseTree: baseSha, - Trees: tree, - }). + body := &TreeReq{Trees: tree} + if baseSha != "" { + body.BaseTree = baseSha + } + res, err := d.client.R().SetBody(body). Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees", d.Owner, d.Repo)) if err != nil { return "", err @@ -822,6 +832,13 @@ func (d *Github) commit(message, treeSha string) error { "parents": []string{oldCommit}, } d.addCommitterAndAuthor(&body) + if d.pgpEntity != nil { + signature, e := signCommit(&body, d.pgpEntity) + if e != nil { + return e + } + body["signature"] = signature + } res, err := d.client.R().SetBody(body).Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/commits", d.Owner, d.Repo)) if err != nil { return err @@ -925,6 +942,21 @@ func (d *Github) getRepo() (*RepoResp, error) { return &resp, nil } +func (d *Github) getAuthenticatedUser() (*UserResp, error) { + res, err := d.client.R().Get("https://api.github.com/user") + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + return nil, toErr(res) + } + resp := &UserResp{} + if err = utils.Json.Unmarshal(res.Body(), resp); err != nil { + return nil, err + } + return resp, nil +} + func (d *Github) addCommitterAndAuthor(m *map[string]interface{}) { if d.CommitterName != "" { committer := map[string]string{ diff --git a/drivers/github/meta.go b/drivers/github/meta.go index 05e704be..7de8d73c 100644 --- a/drivers/github/meta.go +++ b/drivers/github/meta.go @@ -7,21 +7,23 @@ import ( type Addition struct { driver.RootPath - Token string `json:"token" type:"string"` - Owner string `json:"owner" type:"string" required:"true"` - Repo string `json:"repo" type:"string" required:"true"` - Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."` - GitHubProxy string `json:"gh_proxy" type:"string" help:"GitHub proxy, e.g. https://ghproxy.net/raw.githubusercontent.com or https://gh-proxy.com/raw.githubusercontent.com"` - CommitterName string `json:"committer_name" type:"string"` - CommitterEmail string `json:"committer_email" type:"string"` - AuthorName string `json:"author_name" type:"string"` - AuthorEmail string `json:"author_email" type:"string"` - MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"` - DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"` - PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"` - RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"` - CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"` - MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"` + Token string `json:"token" type:"string" required:"true"` + Owner string `json:"owner" type:"string" required:"true"` + Repo string `json:"repo" type:"string" required:"true"` + Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."` + GitHubProxy string `json:"gh_proxy" type:"string" help:"GitHub proxy, e.g. https://ghproxy.net/raw.githubusercontent.com or https://gh-proxy.com/raw.githubusercontent.com"` + GPGPrivateKey string `json:"gpg_private_key" type:"text"` + GPGKeyPassphrase string `json:"gpg_key_passphrase" type:"string"` + CommitterName string `json:"committer_name" type:"string"` + CommitterEmail string `json:"committer_email" type:"string"` + AuthorName string `json:"author_name" type:"string"` + AuthorEmail string `json:"author_email" type:"string"` + MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"` + DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"` + PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"` + RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"` + CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"` + MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"` } var config = driver.Config{ diff --git a/drivers/github/types.go b/drivers/github/types.go index 425f8979..b057385c 100644 --- a/drivers/github/types.go +++ b/drivers/github/types.go @@ -79,7 +79,7 @@ type TreeResp struct { } type TreeReq struct { - BaseTree string `json:"base_tree"` + BaseTree interface{} `json:"base_tree,omitempty"` Trees []interface{} `json:"tree"` } @@ -100,3 +100,8 @@ type UpdateRefReq struct { type RepoResp struct { DefaultBranch string `json:"default_branch"` } + +type UserResp struct { + Name string `json:"name"` + Email string `json:"email"` +} diff --git a/drivers/github/util.go b/drivers/github/util.go index 85bc3cb9..03318784 100644 --- a/drivers/github/util.go +++ b/drivers/github/util.go @@ -1,14 +1,20 @@ package github import ( + "bytes" "context" "errors" "fmt" + "io" + "strings" + "text/template" + "time" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/armor" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" - "strings" - "text/template" ) type MessageTemplateVars struct { @@ -97,3 +103,65 @@ func getUsername(ctx context.Context) string { } return user.Username } + +func loadPrivateKey(key, passphrase string) (*openpgp.Entity, error) { + entityList, err := openpgp.ReadArmoredKeyRing(strings.NewReader(key)) + if err != nil { + return nil, err + } + if len(entityList) < 1 { + return nil, fmt.Errorf("no keys found in key ring") + } + entity := entityList[0] + + pass := []byte(passphrase) + if entity.PrivateKey != nil && entity.PrivateKey.Encrypted { + if err = entity.PrivateKey.Decrypt(pass); err != nil { + return nil, fmt.Errorf("password incorrect: %+v", err) + } + } + for _, subKey := range entity.Subkeys { + if subKey.PrivateKey != nil && subKey.PrivateKey.Encrypted { + if err = subKey.PrivateKey.Decrypt(pass); err != nil { + return nil, fmt.Errorf("password incorrect: %+v", err) + } + } + } + return entity, nil +} + +func signCommit(m *map[string]interface{}, entity *openpgp.Entity) (string, error) { + var commit strings.Builder + commit.WriteString(fmt.Sprintf("tree %s\n", (*m)["tree"].(string))) + parents := (*m)["parents"].([]string) + for _, p := range parents { + commit.WriteString(fmt.Sprintf("parent %s\n", p)) + } + now := time.Now() + _, offset := now.Zone() + hour := offset / 3600 + author := (*m)["author"].(map[string]string) + commit.WriteString(fmt.Sprintf("author %s <%s> %d %+03d00\n", author["name"], author["email"], now.Unix(), hour)) + author["date"] = now.Format(time.RFC3339) + committer := (*m)["committer"].(map[string]string) + commit.WriteString(fmt.Sprintf("committer %s <%s> %d %+03d00\n", committer["name"], committer["email"], now.Unix(), hour)) + committer["date"] = now.Format(time.RFC3339) + commit.WriteString(fmt.Sprintf("\n%s", (*m)["message"].(string))) + data := commit.String() + + var sigBuffer bytes.Buffer + err := openpgp.DetachSign(&sigBuffer, entity, strings.NewReader(data), nil) + if err != nil { + return "", fmt.Errorf("signing failed: %v", err) + } + var armoredSig bytes.Buffer + armorWriter, err := armor.Encode(&armoredSig, "PGP SIGNATURE", nil) + if err != nil { + return "", err + } + if _, err = io.Copy(armorWriter, &sigBuffer); err != nil { + return "", err + } + _ = armorWriter.Close() + return armoredSig.String(), nil +} diff --git a/go.mod b/go.mod index 7bf8a4bb..fad15501 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ toolchain go1.23.1 require ( github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 github.com/KirCute/sftpd-alist v0.0.12 + github.com/ProtonMail/go-crypto v1.0.0 github.com/SheltonZhu/115driver v1.0.34 github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4 @@ -90,6 +91,7 @@ require ( github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/charmbracelet/x/ansi v0.2.3 // indirect github.com/charmbracelet/x/term v0.2.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect diff --git a/go.sum b/go.sum index a51e0c6a..4237df78 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,8 @@ github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnO github.com/KirCute/sftpd-alist v0.0.12/go.mod h1:2wNK7yyW2XfjyJq10OY6xB4COLac64hOwfV6clDJn6s= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= @@ -118,6 +120,7 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= @@ -147,6 +150,9 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA= github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= @@ -643,6 +649,8 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= @@ -706,8 +714,10 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= @@ -764,6 +774,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -779,7 +791,9 @@ golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXct golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= @@ -797,6 +811,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= From 646c7bcd21d8c00b75f8e12ecc275fcdf5689dc9 Mon Sep 17 00:00:00 2001 From: KirCute <951206789@qq.com> Date: Sat, 1 Mar 2025 18:34:33 +0800 Subject: [PATCH 123/187] fix(archive): use another sign for extraction (#7982) --- internal/sign/archive.go | 41 ++++++++++++++++++++++++++++++++++++++ server/debug.go | 3 ++- server/handles/archive.go | 2 +- server/middlewares/down.go | 41 +++++++++++++++++++------------------- server/router.go | 23 +++++++++++---------- 5 files changed, 78 insertions(+), 32 deletions(-) create mode 100644 internal/sign/archive.go diff --git a/internal/sign/archive.go b/internal/sign/archive.go new file mode 100644 index 00000000..26a2c208 --- /dev/null +++ b/internal/sign/archive.go @@ -0,0 +1,41 @@ +package sign + +import ( + "sync" + "time" + + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/sign" +) + +var onceArchive sync.Once +var instanceArchive sign.Sign + +func SignArchive(data string) string { + expire := setting.GetInt(conf.LinkExpiration, 0) + if expire == 0 { + return NotExpiredArchive(data) + } else { + return WithDurationArchive(data, time.Duration(expire)*time.Hour) + } +} + +func WithDurationArchive(data string, d time.Duration) string { + onceArchive.Do(InstanceArchive) + return instanceArchive.Sign(data, time.Now().Add(d).Unix()) +} + +func NotExpiredArchive(data string) string { + onceArchive.Do(InstanceArchive) + return instanceArchive.Sign(data, 0) +} + +func VerifyArchive(data string, sign string) error { + onceArchive.Do(InstanceArchive) + return instanceArchive.Verify(data, sign) +} + +func InstanceArchive() { + instanceArchive = sign.NewHMACSign([]byte(setting.GetStr(conf.Token) + "-archive")) +} diff --git a/server/debug.go b/server/debug.go index 081ef8c3..a4242abd 100644 --- a/server/debug.go +++ b/server/debug.go @@ -5,6 +5,7 @@ import ( _ "net/http/pprof" "runtime" + "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/server/common" "github.com/alist-org/alist/v3/server/middlewares" "github.com/gin-gonic/gin" @@ -15,7 +16,7 @@ func _pprof(g *gin.RouterGroup) { } func debug(g *gin.RouterGroup) { - g.GET("/path/*path", middlewares.Down, func(ctx *gin.Context) { + g.GET("/path/*path", middlewares.Down(sign.Verify), func(ctx *gin.Context) { rawPath := ctx.MustGet("path").(string) ctx.JSON(200, gin.H{ "path": rawPath, diff --git a/server/handles/archive.go b/server/handles/archive.go index fab3916e..4ec933e1 100644 --- a/server/handles/archive.go +++ b/server/handles/archive.go @@ -120,7 +120,7 @@ func FsArchiveMeta(c *gin.Context) { } s := "" if isEncrypt(meta, reqPath) || setting.GetBool(conf.SignAll) { - s = sign.Sign(reqPath) + s = sign.SignArchive(reqPath) } api := "/ae" if ret.DriverProviding { diff --git a/server/middlewares/down.go b/server/middlewares/down.go index 05e9dc85..d015672d 100644 --- a/server/middlewares/down.go +++ b/server/middlewares/down.go @@ -9,35 +9,36 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" - "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" "github.com/pkg/errors" ) -func Down(c *gin.Context) { - rawPath := parsePath(c.Param("path")) - c.Set("path", rawPath) - meta, err := op.GetNearestMeta(rawPath) - if err != nil { - if !errors.Is(errors.Cause(err), errs.MetaNotFound) { - common.ErrorResp(c, err, 500, true) - return - } - } - c.Set("meta", meta) - // verify sign - if needSign(meta, rawPath) { - s := c.Query("sign") - err = sign.Verify(rawPath, strings.TrimSuffix(s, "/")) +func Down(verifyFunc func(string, string) error) func(c *gin.Context) { + return func(c *gin.Context) { + rawPath := parsePath(c.Param("path")) + c.Set("path", rawPath) + meta, err := op.GetNearestMeta(rawPath) if err != nil { - common.ErrorResp(c, err, 401) - c.Abort() - return + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + return + } } + c.Set("meta", meta) + // verify sign + if needSign(meta, rawPath) { + s := c.Query("sign") + err = verifyFunc(rawPath, strings.TrimSuffix(s, "/")) + if err != nil { + common.ErrorResp(c, err, 401) + c.Abort() + return + } + } + c.Next() } - c.Next() } // TODO: implement diff --git a/server/router.go b/server/router.go index 830051d8..2dd6ee88 100644 --- a/server/router.go +++ b/server/router.go @@ -4,6 +4,7 @@ import ( "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/message" + "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" @@ -40,16 +41,18 @@ func Init(e *gin.Engine) { S3(g.Group("/s3")) downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit) - g.GET("/d/*path", middlewares.Down, downloadLimiter, handles.Down) - g.GET("/p/*path", middlewares.Down, downloadLimiter, handles.Proxy) - g.HEAD("/d/*path", middlewares.Down, handles.Down) - g.HEAD("/p/*path", middlewares.Down, handles.Proxy) - g.GET("/ad/*path", middlewares.Down, downloadLimiter, handles.ArchiveDown) - g.GET("/ap/*path", middlewares.Down, downloadLimiter, handles.ArchiveProxy) - g.GET("/ae/*path", middlewares.Down, downloadLimiter, handles.ArchiveInternalExtract) - g.HEAD("/ad/*path", middlewares.Down, handles.ArchiveDown) - g.HEAD("/ap/*path", middlewares.Down, handles.ArchiveProxy) - g.HEAD("/ae/*path", middlewares.Down, handles.ArchiveInternalExtract) + signCheck := middlewares.Down(sign.Verify) + g.GET("/d/*path", signCheck, downloadLimiter, handles.Down) + g.GET("/p/*path", signCheck, downloadLimiter, handles.Proxy) + g.HEAD("/d/*path", signCheck, handles.Down) + g.HEAD("/p/*path", signCheck, handles.Proxy) + archiveSignCheck := middlewares.Down(sign.VerifyArchive) + g.GET("/ad/*path", archiveSignCheck, downloadLimiter, handles.ArchiveDown) + g.GET("/ap/*path", archiveSignCheck, downloadLimiter, handles.ArchiveProxy) + g.GET("/ae/*path", archiveSignCheck, downloadLimiter, handles.ArchiveInternalExtract) + g.HEAD("/ad/*path", archiveSignCheck, handles.ArchiveDown) + g.HEAD("/ap/*path", archiveSignCheck, handles.ArchiveProxy) + g.HEAD("/ae/*path", archiveSignCheck, handles.ArchiveInternalExtract) api := g.Group("/api") auth := api.Group("", middlewares.Auth) From 4145734c1883f8c2b29618ababc69f4527419cd0 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Sat, 1 Mar 2025 18:35:34 +0800 Subject: [PATCH 124/187] refactor(net): pass request header (#8031 close #8008) * refactor(net): pass request header * feat(proxy): add `Etag` to response header * refactor --- drivers/alias/util.go | 1 + drivers/crypt/driver.go | 8 +------- drivers/quark_uc/util.go | 2 +- drivers/quqi/util.go | 4 ---- internal/net/request.go | 3 +++ internal/net/serve.go | 2 +- internal/net/util.go | 15 +++++++++++++-- internal/stream/stream.go | 4 ++-- internal/stream/util.go | 14 +++++++++++--- pkg/utils/io.go | 6 ------ server/common/proxy.go | 24 +++++++++++++++++++----- server/s3/backend.go | 6 +----- server/webdav/prop.go | 4 ++-- server/webdav/webdav.go | 7 +------ 14 files changed, 56 insertions(+), 44 deletions(-) diff --git a/drivers/alias/util.go b/drivers/alias/util.go index ee17b622..2157a43d 100644 --- a/drivers/alias/util.go +++ b/drivers/alias/util.go @@ -63,6 +63,7 @@ func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Ob Size: obj.GetSize(), Modified: obj.ModTime(), IsFolder: obj.IsDir(), + HashInfo: obj.GetHash(), }, nil } diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index e6f253d1..59b25806 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -263,12 +263,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( } rrc := remoteLink.RangeReadCloser if len(remoteLink.URL) > 0 { - - rangedRemoteLink := &model.Link{ - URL: remoteLink.URL, - Header: remoteLink.Header, - } - var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink) + var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink) if err != nil { return nil, err } @@ -304,7 +299,6 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers} resultLink := &model.Link{ - Header: remoteLink.Header, RangeReadCloser: resultRangeReadCloser, Expiration: remoteLink.Expiration, } diff --git a/drivers/quark_uc/util.go b/drivers/quark_uc/util.go index 9a3bdc1c..c5845cc6 100644 --- a/drivers/quark_uc/util.go +++ b/drivers/quark_uc/util.go @@ -170,7 +170,7 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit if res.StatusCode() != 200 { return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String()) } - return res.Header().Get("ETag"), nil + return res.Header().Get("Etag"), nil } func (d *QuarkOrUC) upCommit(pre UpPreResp, md5s []string) error { diff --git a/drivers/quqi/util.go b/drivers/quqi/util.go index c57e641b..5ad43c4b 100644 --- a/drivers/quqi/util.go +++ b/drivers/quqi/util.go @@ -304,10 +304,6 @@ func (d *Quqi) linkFromCDN(id string) (*model.Link, error) { } return &model.Link{ - Header: http.Header{ - "Origin": []string{"https://quqi.com"}, - "Cookie": []string{d.Cookie}, - }, RangeReadCloser: &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}, Expiration: &expiration, }, nil diff --git a/internal/net/request.go b/internal/net/request.go index d2f3028f..c9ef363f 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -382,6 +382,9 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int if resp == nil { return 0, err } + if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return 0, err + } if ch.id == 0 { //第1个任务 有限的重试,超过重试就会结束请求 switch resp.StatusCode { default: diff --git a/internal/net/serve.go b/internal/net/serve.go index c75e611f..8b6b3d1d 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time // 使用请求的Context // 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞 - ctx := r.Context() + ctx := context.WithValue(r.Context(), "request_header", &r.Header) switch { case len(ranges) == 0: reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1}) diff --git a/internal/net/util.go b/internal/net/util.go index 45301dde..5b335a7f 100644 --- a/internal/net/util.go +++ b/internal/net/util.go @@ -71,6 +71,7 @@ func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult { if im == "" { return condNone } + r.Header.Del("If-Match") for { im = textproto.TrimString(im) if len(im) == 0 { @@ -98,7 +99,11 @@ func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult { func checkIfUnmodifiedSince(r *http.Request, modtime time.Time) condResult { ius := r.Header.Get("If-Unmodified-Since") - if ius == "" || isZeroTime(modtime) { + if ius == "" { + return condNone + } + r.Header.Del("If-Unmodified-Since") + if isZeroTime(modtime) { return condNone } t, err := http.ParseTime(ius) @@ -120,6 +125,7 @@ func checkIfNoneMatch(w http.ResponseWriter, r *http.Request) condResult { if inm == "" { return condNone } + r.Header.Del("If-None-Match") buf := inm for { buf = textproto.TrimString(buf) @@ -150,7 +156,11 @@ func checkIfModifiedSince(r *http.Request, modtime time.Time) condResult { return condNone } ims := r.Header.Get("If-Modified-Since") - if ims == "" || isZeroTime(modtime) { + if ims == "" { + return condNone + } + r.Header.Del("If-Modified-Since") + if isZeroTime(modtime) { return condNone } t, err := http.ParseTime(ims) @@ -174,6 +184,7 @@ func checkIfRange(w http.ResponseWriter, r *http.Request, modtime time.Time) con if ir == "" { return condNone } + r.Header.Del("If-Range") etag, _ := scanETag(ir) if etag != "" { if etagStrongMatch(etag, w.Header().Get("Etag")) { diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 5eb6bdc7..1c94715f 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -384,7 +384,7 @@ func (c *headCache) read(p []byte) (n int, err error) { n, err = lr.Read(buf[off:]) off += n c.cur += int64(n) - if err == io.EOF && n == int(bufL) { + if err == io.EOF && off == int(bufL) { err = nil } if err != nil { @@ -468,7 +468,7 @@ func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) } } if rc != nil && off-rc.cur <= utils.MB { - n, err := utils.CopyWithBufferN(utils.NullWriter{}, rc.reader, off-rc.cur) + n, err := utils.CopyWithBufferN(io.Discard, rc.reader, off-rc.cur) rc.cur += n if err == io.EOF && rc.cur == off { err = nil diff --git a/internal/stream/util.go b/internal/stream/util.go index bb5019e0..b2c76754 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -3,13 +3,13 @@ package stream import ( "context" "fmt" - "github.com/alist-org/alist/v3/pkg/utils" "io" "net/http" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/alist-org/alist/v3/pkg/utils" log "github.com/sirupsen/logrus" ) @@ -19,7 +19,11 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl } rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) { if link.Concurrency != 0 || link.PartSize != 0 { - header := net.ProcessHeader(http.Header{}, link.Header) + requestHeader := ctx.Value("request_header") + if requestHeader == nil { + requestHeader = &http.Header{} + } + header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header) down := net.NewDownloader(func(d *net.Downloader) { d.Concurrency = link.Concurrency d.PartSize = link.PartSize @@ -60,7 +64,11 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl } func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) { - header := net.ProcessHeader(http.Header{}, link.Header) + requestHeader := ctx.Value("request_header") + if requestHeader == nil { + requestHeader = &http.Header{} + } + header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header) header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header) return net.RequestHttp(ctx, "GET", header, link.URL) diff --git a/pkg/utils/io.go b/pkg/utils/io.go index c314307d..e06fb235 100644 --- a/pkg/utils/io.go +++ b/pkg/utils/io.go @@ -233,9 +233,3 @@ func CopyWithBufferN(dst io.Writer, src io.Reader, n int64) (written int64, err } return } - -type NullWriter struct{} - -func (NullWriter) Write(p []byte) (n int, err error) { - return len(p), nil -} diff --git a/server/common/proxy.go b/server/common/proxy.go index 66854976..8519ed53 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -19,7 +19,7 @@ import ( func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error { if link.MFile != nil { defer link.MFile.Close() - attachFileName(w, file) + attachHeader(w, file) contentType := link.Header.Get("Content-Type") if contentType != "" { w.Header().Set("Content-Type", contentType) @@ -35,17 +35,21 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. http.ServeContent(w, r, file.GetName(), file.ModTime(), mFile) return nil } else if link.RangeReadCloser != nil { - attachFileName(w, file) + attachHeader(w, file) net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ RangeReadCloserIF: link.RangeReadCloser, Limiter: stream.ServerDownloadLimit, }) return nil } else if link.Concurrency != 0 || link.PartSize != 0 { - attachFileName(w, file) + attachHeader(w, file) size := file.GetSize() - header := net.ProcessHeader(r.Header, link.Header) rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { + requestHeader := ctx.Value("request_header") + if requestHeader == nil { + requestHeader = &http.Header{} + } + header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header) down := net.NewDownloader(func(d *net.Downloader) { d.Concurrency = link.Concurrency d.PartSize = link.PartSize @@ -91,10 +95,20 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. return nil } } -func attachFileName(w http.ResponseWriter, file model.Obj) { +func attachHeader(w http.ResponseWriter, file model.Obj) { fileName := file.GetName() w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, fileName, url.PathEscape(fileName))) w.Header().Set("Content-Type", utils.GetMimeType(fileName)) + w.Header().Set("Etag", GetEtag(file)) +} +func GetEtag(file model.Obj) string { + for _, v := range file.GetHash().Export() { + if len(v) != 0 { + return fmt.Sprintf(`"%s"`, v) + } + } + // 参考nginx + return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), file.GetSize()) } var NoProxyRange = &model.RangeReadCloser{} diff --git a/server/s3/backend.go b/server/s3/backend.go index bca45008..a1e99044 100644 --- a/server/s3/backend.go +++ b/server/s3/backend.go @@ -195,11 +195,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string } rrc := link.RangeReadCloser if len(link.URL) > 0 { - rangedRemoteLink := &model.Link{ - URL: link.URL, - Header: link.Header, - } - var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink) + var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, link) if err != nil { return nil, err } diff --git a/server/webdav/prop.go b/server/webdav/prop.go index b1474ea3..5e053af4 100644 --- a/server/webdav/prop.go +++ b/server/webdav/prop.go @@ -9,7 +9,6 @@ import ( "context" "encoding/xml" "errors" - "fmt" "mime" "net/http" "path" @@ -18,6 +17,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/server/common" ) // Proppatch describes a property update instruction as defined in RFC 4918. @@ -473,7 +473,7 @@ func findETag(ctx context.Context, ls LockSystem, name string, fi model.Obj) (st // The Apache http 2.4 web server by default concatenates the // modification time and size of a file. We replicate the heuristic // with nanosecond granularity. - return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.GetSize()), nil + return common.GetEtag(fi), nil } func findSupportedLock(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) { diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index 6585056b..1b7ec6ff 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -227,11 +227,6 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta if err != nil { return http.StatusNotFound, err } - etag, err := findETag(ctx, h.LockSystem, reqPath, fi) - if err != nil { - return http.StatusInternalServerError, err - } - w.Header().Set("ETag", etag) if r.Method == http.MethodHead { w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.GetSize())) return http.StatusOK, nil @@ -361,7 +356,7 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, if err != nil { return http.StatusInternalServerError, err } - w.Header().Set("ETag", etag) + w.Header().Set("Etag", etag) return http.StatusCreated, nil } From 2570707a0619bbc377df0b0cd94233d3ea788f5e Mon Sep 17 00:00:00 2001 From: Ljcbaby <46277145+ljcbaby@users.noreply.github.com> Date: Sat, 1 Mar 2025 18:46:05 +0800 Subject: [PATCH 125/187] feat(baidu_netdisk): support dynamical slice size for low bandwith upload case (#7965) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 动态分片尺寸 * 补充严格测试结果 --- drivers/baidu_netdisk/driver.go | 4 +- drivers/baidu_netdisk/meta.go | 21 ++++----- drivers/baidu_netdisk/util.go | 78 +++++++++++++++++++++++++++------ 3 files changed, 77 insertions(+), 26 deletions(-) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index e0ba98fa..a07ef742 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -189,7 +189,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F } streamSize := stream.GetSize() - sliceSize := d.getSliceSize() + sliceSize := d.getSliceSize(streamSize) count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1)) lastBlockSize := streamSize % sliceSize if streamSize > 0 && lastBlockSize == 0 { @@ -197,7 +197,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F } //cal md5 for first 256k data - const SliceSize int64 = 256 * 1024 + const SliceSize int64 = 256 * utils.KB // cal md5 blockList := make([]string, 0, count) byteSize := sliceSize diff --git a/drivers/baidu_netdisk/meta.go b/drivers/baidu_netdisk/meta.go index bf2aed5a..e9226a0d 100644 --- a/drivers/baidu_netdisk/meta.go +++ b/drivers/baidu_netdisk/meta.go @@ -8,16 +8,17 @@ import ( type Addition struct { RefreshToken string `json:"refresh_token" required:"true"` driver.RootPath - OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"` - OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` - DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"` - ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` - ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` - CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"` - AccessToken string - UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` - UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"` - CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` + OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"` + OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` + DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"` + ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` + ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` + CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"` + AccessToken string + UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` + UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"` + CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` + LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"` } var config = driver.Config{ diff --git a/drivers/baidu_netdisk/util.go b/drivers/baidu_netdisk/util.go index ca1a6805..a4fc13f8 100644 --- a/drivers/baidu_netdisk/util.go +++ b/drivers/baidu_netdisk/util.go @@ -136,7 +136,7 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) { return res, nil } -func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model.Link, error) { +func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Link, error) { var resp DownloadResp params := map[string]string{ "method": "filemetas", @@ -164,7 +164,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model }, nil } -func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Link, error) { +func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link, error) { var resp DownloadResp2 param := map[string]string{ "target": fmt.Sprintf("[\"%s\"]", file.GetPath()), @@ -230,22 +230,72 @@ func joinTime(form map[string]string, ctime, mtime int64) { const ( DefaultSliceSize int64 = 4 * utils.MB - VipSliceSize = 16 * utils.MB - SVipSliceSize = 32 * utils.MB + VipSliceSize int64 = 16 * utils.MB + SVipSliceSize int64 = 32 * utils.MB + + MaxSliceNum = 2048 // 文档写的是 1024/没写 ,但实际测试是 2048 + SliceStep int64 = 1 * utils.MB ) -func (d *BaiduNetdisk) getSliceSize() int64 { - if d.CustomUploadPartSize != 0 { - return d.CustomUploadPartSize - } - switch d.vipType { - case 1: - return VipSliceSize - case 2: - return SVipSliceSize - default: +func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 { + // 非会员固定为 4MB + if d.vipType == 0 { + if d.CustomUploadPartSize != 0 { + log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize") + } + if filesize > MaxSliceNum*DefaultSliceSize { + log.Warnf("File size(%d) is too large, may cause upload failure", filesize) + } + return DefaultSliceSize } + + if d.CustomUploadPartSize != 0 { + if d.CustomUploadPartSize < DefaultSliceSize { + log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize) + return DefaultSliceSize + } + + if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize { + log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize) + return VipSliceSize + } + + if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize { + log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize) + return SVipSliceSize + } + + return d.CustomUploadPartSize + } + + maxSliceSize := DefaultSliceSize + + switch d.vipType { + case 1: + maxSliceSize = VipSliceSize + case 2: + maxSliceSize = SVipSliceSize + } + + // upload on low bandwidth + if d.LowBandwithUploadMode { + size := DefaultSliceSize + + for size <= maxSliceSize { + if filesize <= MaxSliceNum*size { + return size + } + + size += SliceStep + } + } + + if filesize > MaxSliceNum*maxSliceSize { + log.Warnf("File size(%d) is too large, may cause upload failure", filesize) + } + + return maxSliceSize } // func encodeURIComponent(str string) string { From 370a6c15a96503d5a3688a723e9c7a6735f6b2b0 Mon Sep 17 00:00:00 2001 From: Ljcbaby <46277145+ljcbaby@users.noreply.github.com> Date: Sat, 1 Mar 2025 19:00:36 +0800 Subject: [PATCH 126/187] fix(baidu_netdisk): remove duplicate retry (#7972) --- drivers/baidu_netdisk/driver.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index a07ef742..264f3b02 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -20,7 +20,6 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/errgroup" "github.com/alist-org/alist/v3/pkg/utils" - "github.com/avast/retry-go" log "github.com/sirupsen/logrus" ) @@ -261,10 +260,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F } } // step.2 上传分片 - threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, - retry.Attempts(3), - retry.Delay(time.Second), - retry.DelayType(retry.BackOffDelay)) + threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread) sem := semaphore.NewWeighted(3) for i, partseq := range precreateResp.BlockList { if utils.IsCanceled(upCtx) { From 5dfea714d816e2da04782843f3ab4ee00a53e755 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sat, 15 Mar 2025 00:12:15 +0800 Subject: [PATCH 127/187] fix(cloudreve): use milliseconds timestamp in last_modified (#8133) --- drivers/cloudreve/driver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index 73fc3fea..33ef7ddc 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -149,7 +149,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File "size": stream.GetSize(), "name": stream.GetName(), "policy_id": r.Policy.Id, - "last_modified": stream.ModTime().Unix(), + "last_modified": stream.ModTime().UnixMilli(), } // 获取上传会话信息 From 7579d44517de5153bf612576be6b9d93dddbac8b Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sat, 15 Mar 2025 00:12:37 +0800 Subject: [PATCH 128/187] fix(onedrive): set req.ContentLength (#8081) * fix(onedrive): set req.ContentLength * fix(onedrive_app): set req.ContentLength * fix(cloudreve): set req.ContentLength --- drivers/cloudreve/util.go | 6 ++++-- drivers/onedrive/util.go | 4 ++-- drivers/onedrive_app/util.go | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index 8a90a42f..f41b6b84 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -208,7 +208,8 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U return err } req = req.WithContext(ctx) - req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.ContentLength = byteSize + // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Authorization", fmt.Sprint(credential)) finish += byteSize res, err := base.HttpClient.Do(req) @@ -247,7 +248,8 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u return err } req = req.WithContext(ctx) - req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.ContentLength = byteSize + // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) finish += byteSize res, err := base.HttpClient.Do(req) diff --git a/drivers/onedrive/util.go b/drivers/onedrive/util.go index 9350a681..55434967 100644 --- a/drivers/onedrive/util.go +++ b/drivers/onedrive/util.go @@ -8,7 +8,6 @@ import ( "io" "net/http" stdpath "path" - "strconv" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" @@ -226,7 +225,8 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil return err } req = req.WithContext(ctx) - req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.ContentLength = byteSize + // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) finish += byteSize res, err := base.HttpClient.Do(req) diff --git a/drivers/onedrive_app/util.go b/drivers/onedrive_app/util.go index a6793520..1b01324e 100644 --- a/drivers/onedrive_app/util.go +++ b/drivers/onedrive_app/util.go @@ -8,7 +8,6 @@ import ( "io" "net/http" stdpath "path" - "strconv" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" @@ -176,7 +175,8 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model. return err } req = req.WithContext(ctx) - req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.ContentLength = byteSize + // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) finish += byteSize res, err := base.HttpClient.Do(req) From 0126af4de07d9711e9c2193e7b88583db78cbf46 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Sat, 15 Mar 2025 00:13:30 +0800 Subject: [PATCH 129/187] fix(crypt): premature close of MFile (#8132 close #8119) * fix(crypt): premature close of MFile * refactor --- drivers/crypt/driver.go | 5 +++-- server/common/proxy.go | 9 +++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index 59b25806..2330fb97 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -282,8 +282,9 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( if err != nil { return nil, err } - // 可以直接返回,读取完也不会调用Close,直到连接断开Close - return remoteLink.MFile, nil + //keep reuse same MFile and close at last. + remoteClosers.Add(remoteLink.MFile) + return io.NopCloser(remoteLink.MFile), nil } return nil, errs.NotSupport diff --git a/server/common/proxy.go b/server/common/proxy.go index 8519ed53..23360a34 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -7,6 +7,7 @@ import ( "net/http" "net/url" "os" + "strings" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" @@ -102,11 +103,15 @@ func attachHeader(w http.ResponseWriter, file model.Obj) { w.Header().Set("Etag", GetEtag(file)) } func GetEtag(file model.Obj) string { + hash := "" for _, v := range file.GetHash().Export() { - if len(v) != 0 { - return fmt.Sprintf(`"%s"`, v) + if strings.Compare(v, hash) > 0 { + hash = v } } + if len(hash) > 0 { + return fmt.Sprintf(`"%s"`, hash) + } // 参考nginx return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), file.GetSize()) } From 28b61a93fdd6b71553bf32b40107822a72b22d91 Mon Sep 17 00:00:00 2001 From: shniubobo Date: Fri, 14 Mar 2025 16:21:07 +0000 Subject: [PATCH 130/187] feat(webdav): support `oc:checksums` (#8064 close #7472) Ref: #7472 --- pkg/utils/hash.go | 11 +++++++++++ server/webdav/prop.go | 15 ++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/pkg/utils/hash.go b/pkg/utils/hash.go index fa06bcc2..a281dd4e 100644 --- a/pkg/utils/hash.go +++ b/pkg/utils/hash.go @@ -10,6 +10,7 @@ import ( "errors" "hash" "io" + "iter" "github.com/alist-org/alist/v3/internal/errs" log "github.com/sirupsen/logrus" @@ -226,3 +227,13 @@ func (hi HashInfo) GetHash(ht *HashType) string { func (hi HashInfo) Export() map[*HashType]string { return hi.h } + +func (hi HashInfo) All() iter.Seq2[*HashType, string] { + return func(yield func(*HashType, string) bool) { + for hashType, hashValue := range hi.h { + if !yield(hashType, hashValue) { + return + } + } + } +} diff --git a/server/webdav/prop.go b/server/webdav/prop.go index 5e053af4..a81f31b0 100644 --- a/server/webdav/prop.go +++ b/server/webdav/prop.go @@ -9,6 +9,7 @@ import ( "context" "encoding/xml" "errors" + "fmt" "mime" "net/http" "path" @@ -101,7 +102,7 @@ type DeadPropsHolder interface { Patch([]Proppatch) ([]Propstat, error) } -// liveProps contains all supported, protected DAV: properties. +// liveProps contains all supported properties. var liveProps = map[xml.Name]struct { // findFn implements the propfind function of this property. If nil, // it indicates a hidden property. @@ -160,6 +161,10 @@ var liveProps = map[xml.Name]struct { findFn: findSupportedLock, dir: true, }, + {Space: "http://owncloud.org/ns", Local: "checksums"}: { + findFn: findChecksums, + dir: false, + }, } // TODO(nigeltao) merge props and allprop? @@ -483,3 +488,11 @@ func findSupportedLock(ctx context.Context, ls LockSystem, name string, fi model `` + ``, nil } + +func findChecksums(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) { + checksums := "" + for hashType, hashValue := range fi.GetHash().All() { + checksums += fmt.Sprintf("%s:%s", hashType.Name, hashValue) + } + return checksums, nil +} From 04f5525f207e3f2324df41c7bb17f4d17df4fa5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=8A=98=E7=BA=B8=E9=A3=9E=E6=9C=BA?= Date: Fri, 14 Mar 2025 17:21:24 +0100 Subject: [PATCH 131/187] fix(s3): incorrectly added slash before the Bucket name (#8083 close #8001) --- drivers/s3/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/s3/util.go b/drivers/s3/util.go index 99f271aa..e02945a0 100644 --- a/drivers/s3/util.go +++ b/drivers/s3/util.go @@ -199,7 +199,7 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error { dstKey := getKey(dst, false) input := &s3.CopyObjectInput{ Bucket: &d.Bucket, - CopySource: aws.String(url.PathEscape("/" + d.Bucket + "/" + srcKey)), + CopySource: aws.String(url.PathEscape(d.Bucket + "/" + srcKey)), Key: &dstKey, } _, err := d.client.CopyObject(input) From c82e632ee16c150a2844665ddd89defa405d0b29 Mon Sep 17 00:00:00 2001 From: hshpy Date: Sat, 15 Mar 2025 23:28:40 +0800 Subject: [PATCH 132/187] fix: potential XSS vulnerabilities (#7923) * fix: potential XSS vulnerabilities * feat: support filter and render for readme.md * chore: set ReadMeAutoRender to true * fix attachFileName undefined --------- Co-authored-by: Andy Hsu --- go.mod | 4 ++ go.sum | 8 ++++ internal/bootstrap/data/setting.go | 5 ++- internal/conf/const.go | 3 +- server/common/proxy.go | 64 ++++++++++++++++++++++++++++++ 5 files changed, 82 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index fad15501..f07db3fe 100644 --- a/go.mod +++ b/go.mod @@ -83,6 +83,7 @@ require ( require ( github.com/STARRY-S/zip v0.2.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect github.com/blevesearch/go-faiss v1.0.20 // indirect github.com/blevesearch/zapx/v16 v16.1.5 // indirect github.com/bodgit/plumbing v1.3.0 // indirect @@ -97,6 +98,7 @@ require ( github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/fclairamb/go-log v0.5.0 // indirect + github.com/gorilla/css v1.0.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hekmon/cunits/v2 v2.1.0 // indirect @@ -105,11 +107,13 @@ require ( github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/text v0.2.0 // indirect github.com/matoous/go-nanoid/v2 v2.1.0 // indirect + github.com/microcosm-cc/bluemonday v1.0.27 github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect + github.com/yuin/goldmark v1.7.8 go4.org v0.0.0-20230225012048-214862532bf5 // indirect ) diff --git a/go.sum b/go.sum index 4237df78..e6a8574b 100644 --- a/go.sum +++ b/go.sum @@ -68,6 +68,8 @@ github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiE github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -303,6 +305,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -424,6 +428,8 @@ github.com/meilisearch/meilisearch-go v0.27.2 h1:3G21dJ5i208shnLPDsIEZ0L0Geg/5oe github.com/meilisearch/meilisearch-go v0.27.2/go.mod h1:SxuSqDcPBIykjWz1PX+KzsYzArNLSCadQodWs8extS0= github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q= github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I= +github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= +github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/minio/sio v0.4.0 h1:u4SWVEm5lXSqU42ZWawV0D9I5AZ5YMmo2RXpEQ/kRhc= @@ -613,6 +619,8 @@ github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhf github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic= +github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 h1:X+lHsNTlbatQ1cErXIbtyrh+3MTWxqQFS+sBP/wpFXo= diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index 026a89e1..407a5c64 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -1,6 +1,8 @@ package data import ( + "strconv" + "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/db" @@ -11,7 +13,6 @@ import ( "github.com/alist-org/alist/v3/pkg/utils/random" "github.com/pkg/errors" "gorm.io/gorm" - "strconv" ) var initialSettingItems []model.SettingItem @@ -141,6 +142,8 @@ func InitialSettings() []model.SettingItem { {Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, {Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, {Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, + {Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, + {Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, // global settings {Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL}, {Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL}, diff --git a/internal/conf/const.go b/internal/conf/const.go index 2234e9bc..5cb8d850 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -31,7 +31,8 @@ const ( AudioAutoplay = "audio_autoplay" VideoAutoplay = "video_autoplay" PreviewArchivesByDefault = "preview_archives_by_default" - + ReadMeAutoRender = "readme_autorender" + FilterReadMeScripts = "filter_readme_scripts" // global HideFiles = "hide_files" CustomizeHead = "customize_head" diff --git a/server/common/proxy.go b/server/common/proxy.go index 23360a34..1d61e5fa 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -1,23 +1,87 @@ package common import ( + "bytes" "context" "fmt" "io" "net/http" "net/url" "os" + "strconv" "strings" + "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" + "github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" + "github.com/microcosm-cc/bluemonday" log "github.com/sirupsen/logrus" + "github.com/yuin/goldmark" ) +func processMarkdown(content []byte) ([]byte, error) { + var buf bytes.Buffer + if err := goldmark.New().Convert(content, &buf); err != nil { + return nil, fmt.Errorf("markdown conversion failed: %w", err) + } + return bluemonday.UGCPolicy().SanitizeBytes(buf.Bytes()), nil +} + func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error { + + //优先处理md文件 + if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) { + var markdownContent []byte + var err error + + if link.MFile != nil { + defer link.MFile.Close() + attachHeader(w, file) + markdownContent, err = io.ReadAll(link.MFile) + if err != nil { + return fmt.Errorf("failed to read markdown content: %w", err) + } + + } else { + header := net.ProcessHeader(r.Header, link.Header) + res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL) + if err != nil { + return err + } + defer res.Body.Close() + for h, v := range res.Header { + w.Header()[h] = v + } + w.WriteHeader(res.StatusCode) + if r.Method == http.MethodHead { + return nil + } + markdownContent, err = io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("failed to read markdown content: %w", err) + } + + } + + safeHTML, err := processMarkdown(markdownContent) + if err != nil { + return err + } + + safeHTMLReader := bytes.NewReader(safeHTML) + w.Header().Set("Content-Length", strconv.FormatInt(int64(len(safeHTML)), 10)) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + _, err = utils.CopyWithBuffer(w, safeHTMLReader) + if err != nil { + return err + } + return nil + } + if link.MFile != nil { defer link.MFile.Close() attachHeader(w, file) From d16ba65f4224bb9ace40655c458f6f5e5f9fedad Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Sun, 16 Mar 2025 16:37:33 +0800 Subject: [PATCH 133/187] fix(lang): initialize configuration in LangCmd before generating language JSON file --- cmd/lang.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/lang.go b/cmd/lang.go index 56ef037b..5d8ce837 100644 --- a/cmd/lang.go +++ b/cmd/lang.go @@ -12,6 +12,7 @@ import ( "strings" _ "github.com/alist-org/alist/v3/drivers" + "github.com/alist-org/alist/v3/internal/bootstrap" "github.com/alist-org/alist/v3/internal/bootstrap/data" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/op" @@ -137,6 +138,7 @@ var LangCmd = &cobra.Command{ Use: "lang", Short: "Generate language json file", Run: func(cmd *cobra.Command, args []string) { + bootstrap.InitConfig() err := os.MkdirAll("lang", 0777) if err != nil { utils.Log.Fatalf("failed create folder: %s", err.Error()) From d20f41d687827c4faab615d05dea4e7938c5be25 Mon Sep 17 00:00:00 2001 From: hshpy Date: Sun, 16 Mar 2025 22:14:44 +0800 Subject: [PATCH 134/187] fix: missing handling of RangeReadCloser (#8146) --- server/common/proxy.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/server/common/proxy.go b/server/common/proxy.go index 1d61e5fa..00fee4b2 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -45,7 +45,17 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. if err != nil { return fmt.Errorf("failed to read markdown content: %w", err) } - + } else if link.RangeReadCloser != nil { + attachHeader(w, file) + rrc, err := link.RangeReadCloser.RangeRead(r.Context(), http_range.Range{Start: 0, Length: -1}) + if err != nil { + return err + } + defer rrc.Close() + markdownContent, err = io.ReadAll(rrc) + if err != nil { + return fmt.Errorf("failed to read markdown content: %w", err) + } } else { header := net.ProcessHeader(r.Header, link.Header) res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL) From 3499c4db8712564c9bdbd8c97d3fb7a7739d42a2 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Mon, 17 Mar 2025 00:52:09 +0800 Subject: [PATCH 135/187] feat: 115 open driver (#8139) * wip: 115 open * chore(go.mod): update 115-sdk-go dependency version * feat(115_open): implement directory management and file operations * chore(go.mod): update 115-sdk-go dependency to v0.1.1 and adjust callback handling in driver * chore: rename driver --- drivers/115_open/driver.go | 308 +++++++++++++++++++++++++++++++++++++ drivers/115_open/meta.go | 36 +++++ drivers/115_open/types.go | 59 +++++++ drivers/115_open/util.go | 3 + drivers/all.go | 1 + go.mod | 24 +-- go.sum | 28 ++-- 7 files changed, 436 insertions(+), 23 deletions(-) create mode 100644 drivers/115_open/driver.go create mode 100644 drivers/115_open/meta.go create mode 100644 drivers/115_open/types.go create mode 100644 drivers/115_open/util.go diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go new file mode 100644 index 00000000..67c17608 --- /dev/null +++ b/drivers/115_open/driver.go @@ -0,0 +1,308 @@ +package _115_open + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/alist-org/alist/v3/cmd/flags" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + sdk "github.com/xhofe/115-sdk-go" +) + +type Open115 struct { + model.Storage + Addition + client *sdk.Client +} + +func (d *Open115) Config() driver.Config { + return config +} + +func (d *Open115) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Open115) Init(ctx context.Context) error { + d.client = sdk.New(sdk.WithRefreshToken(d.Addition.RefreshToken), + sdk.WithAccessToken(d.Addition.AccessToken), + sdk.WithOnRefreshToken(func(s1, s2 string) { + d.Addition.AccessToken = s1 + d.Addition.RefreshToken = s2 + op.MustSaveDriverStorage(d) + })) + if flags.Debug || flags.Dev { + d.client.SetDebug(true) + } + _, err := d.client.UserInfo(ctx) + if err != nil { + return err + } + return nil +} + +func (d *Open115) Drop(ctx context.Context) error { + return nil +} + +func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + var res []model.Obj + pageSize := int64(200) + offset := int64(0) + for { + resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{ + CID: dir.GetID(), + Limit: pageSize, + Offset: offset, + ASC: d.Addition.OrderDirection == "asc", + O: d.Addition.OrderBy, + // Cur: 1, + ShowDir: true, + }) + if err != nil { + return nil, err + } + res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj { + obj := Obj(src) + return &obj + })...) + if len(res) >= int(resp.Count) { + break + } + offset += pageSize + } + return res, nil +} + +func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var ua string + if args.Header != nil { + ua = args.Header.Get("User-Agent") + } + if ua == "" { + ua = base.UserAgent + } + obj, ok := file.(*Obj) + if !ok { + return nil, fmt.Errorf("can't convert obj") + } + pc := obj.Pc + resp, err := d.client.DownURL(ctx, pc, ua) + if err != nil { + return nil, err + } + u, ok := resp[obj.GetID()] + if !ok { + return nil, fmt.Errorf("can't get link") + } + return &model.Link{ + URL: u.URL.URL, + Header: http.Header{ + "User-Agent": []string{ua}, + }, + }, nil +} + +func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName) + if err != nil { + return nil, err + } + return &Obj{ + Fid: resp.FileID, + Pid: parentDir.GetID(), + Fn: dirName, + Fc: "0", + Upt: time.Now().Unix(), + Uet: time.Now().Unix(), + UpPt: time.Now().Unix(), + }, nil +} + +func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + _, err := d.client.Move(ctx, &sdk.MoveReq{ + FileIDs: srcObj.GetID(), + ToCid: dstDir.GetID(), + }) + if err != nil { + return nil, err + } + return srcObj, nil +} + +func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + _, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{ + FileID: srcObj.GetID(), + FileNma: newName, + }) + if err != nil { + return nil, err + } + return srcObj, nil +} + +func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + _, err := d.client.Copy(ctx, &sdk.CopyReq{ + PID: dstDir.GetID(), + FileID: srcObj.GetID(), + NoDupli: "1", + }) + if err != nil { + return nil, err + } + return srcObj, nil +} + +func (d *Open115) Remove(ctx context.Context, obj model.Obj) error { + _obj, ok := obj.(*Obj) + if !ok { + return fmt.Errorf("can't convert obj") + } + _, err := d.client.DelFile(ctx, &sdk.DelFileReq{ + FileIDs: _obj.GetID(), + ParentID: _obj.Pid, + }) + if err != nil { + return err + } + return nil +} + +func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + tempF, err := file.CacheFullInTempFile() + if err != nil { + return err + } + // cal full sha1 + sha1, err := utils.HashReader(utils.SHA1, tempF) + if err != nil { + return err + } + _, err = tempF.Seek(0, io.SeekStart) + if err != nil { + return err + } + // pre 128k sha1 + sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024)) + if err != nil { + return err + } + _, err = tempF.Seek(0, io.SeekStart) + if err != nil { + return err + } + // 1. Init + resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{ + FileName: file.GetName(), + FileSize: file.GetSize(), + Target: dstDir.GetID(), + FileID: strings.ToUpper(sha1), + PreID: strings.ToUpper(sha1128k), + }) + if err != nil { + return err + } + if resp.Status == 2 { + return nil + } + // 2. two way verify + if utils.SliceContains([]int{6, 7, 8}, resp.Status) { + signCheck := strings.Split(resp.SignCheck, "-") //"sign_check": "2392148-2392298" 取2392148-2392298之间的内容(包含2392148、2392298)的sha1 + start, err := strconv.ParseInt(signCheck[0], 10, 64) + if err != nil { + return err + } + end, err := strconv.ParseInt(signCheck[1], 10, 64) + if err != nil { + return err + } + _, err = tempF.Seek(start, io.SeekStart) + if err != nil { + return err + } + signVal, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, end-start+1)) + if err != nil { + return err + } + _, err = tempF.Seek(0, io.SeekStart) + if err != nil { + return err + } + resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{ + FileName: file.GetName(), + FileSize: file.GetSize(), + Target: dstDir.GetID(), + FileID: strings.ToUpper(sha1), + PreID: strings.ToUpper(sha1128k), + SignKey: resp.SignKey, + SignVal: strings.ToUpper(signVal), + }) + if err != nil { + return err + } + if resp.Status == 2 { + return nil + } + } + // 3. get upload token + tokenResp, err := d.client.UploadGetToken(ctx) + if err != nil { + return err + } + // 4. upload + ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) + if err != nil { + return err + } + bucket, err := ossClient.Bucket(resp.Bucket) + if err != nil { + return err + } + err = bucket.PutObject(resp.Object, tempF, + oss.Callback(base64.StdEncoding.EncodeToString([]byte(resp.Callback.Value.Callback))), + oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(resp.Callback.Value.CallbackVar))), + ) + if err != nil { + return err + } + return nil +} + +// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { +// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional +// return nil, errs.NotImplement +// } + +// func (d *Open115) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { +// // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional +// return nil, errs.NotImplement +// } + +// func (d *Open115) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { +// // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional +// return nil, errs.NotImplement +// } + +// func (d *Open115) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { +// // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional +// // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir +// // return errs.NotImplement to use an internal archive tool +// return nil, errs.NotImplement +// } + +//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*Open115)(nil) diff --git a/drivers/115_open/meta.go b/drivers/115_open/meta.go new file mode 100644 index 00000000..7e26e0dd --- /dev/null +++ b/drivers/115_open/meta.go @@ -0,0 +1,36 @@ +package _115_open + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + driver.RootID + // define other + RefreshToken string `json:"refresh_token" required:"true"` + OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"` + OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"` + AccessToken string +} + +var config = driver.Config{ + Name: "115 Open", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "0", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Open115{} + }) +} diff --git a/drivers/115_open/types.go b/drivers/115_open/types.go new file mode 100644 index 00000000..491a368e --- /dev/null +++ b/drivers/115_open/types.go @@ -0,0 +1,59 @@ +package _115_open + +import ( + "time" + + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + sdk "github.com/xhofe/115-sdk-go" +) + +type Obj sdk.GetFilesResp_File + +// Thumb implements model.Thumb. +func (o *Obj) Thumb() string { + return o.Thumbnail +} + +// CreateTime implements model.Obj. +func (o *Obj) CreateTime() time.Time { + return time.Unix(o.UpPt, 0) +} + +// GetHash implements model.Obj. +func (o *Obj) GetHash() utils.HashInfo { + return utils.NewHashInfo(utils.SHA1, o.Sha1) +} + +// GetID implements model.Obj. +func (o *Obj) GetID() string { + return o.Fid +} + +// GetName implements model.Obj. +func (o *Obj) GetName() string { + return o.Fn +} + +// GetPath implements model.Obj. +func (o *Obj) GetPath() string { + return "" +} + +// GetSize implements model.Obj. +func (o *Obj) GetSize() int64 { + return o.FS +} + +// IsDir implements model.Obj. +func (o *Obj) IsDir() bool { + return o.Fc == "0" +} + +// ModTime implements model.Obj. +func (o *Obj) ModTime() time.Time { + return time.Unix(o.Upt, 0) +} + +var _ model.Obj = (*Obj)(nil) +var _ model.Thumb = (*Obj)(nil) diff --git a/drivers/115_open/util.go b/drivers/115_open/util.go new file mode 100644 index 00000000..ee021659 --- /dev/null +++ b/drivers/115_open/util.go @@ -0,0 +1,3 @@ +package _115_open + +// do others that not defined in Driver interface diff --git a/drivers/all.go b/drivers/all.go index 2746e1bf..963f0c44 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -2,6 +2,7 @@ package drivers import ( _ "github.com/alist-org/alist/v3/drivers/115" + _ "github.com/alist-org/alist/v3/drivers/115_open" _ "github.com/alist-org/alist/v3/drivers/115_share" _ "github.com/alist-org/alist/v3/drivers/123" _ "github.com/alist-org/alist/v3/drivers/123_link" diff --git a/go.mod b/go.mod index f07db3fe..557c16c2 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/alist-org/alist/v3 -go 1.23 - -toolchain go1.23.1 +go 1.23.4 require ( github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 @@ -67,10 +65,10 @@ require ( github.com/xhofe/wopan-sdk-go v0.1.3 github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 - golang.org/x/crypto v0.31.0 + golang.org/x/crypto v0.36.0 golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e golang.org/x/image v0.19.0 - golang.org/x/net v0.28.0 + golang.org/x/net v0.37.0 golang.org/x/oauth2 v0.22.0 golang.org/x/time v0.8.0 google.golang.org/appengine v1.6.8 @@ -107,14 +105,16 @@ require ( github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/text v0.2.0 // indirect github.com/matoous/go-nanoid/v2 v2.1.0 // indirect - github.com/microcosm-cc/bluemonday v1.0.27 + github.com/microcosm-cc/bluemonday v1.0.27 github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/yuin/goldmark v1.7.8 + github.com/xhofe/115-sdk-go v0.1.1 + github.com/yuin/goldmark v1.7.8 go4.org v0.0.0-20230225012048-214862532bf5 // indirect + resty.dev/v3 v3.0.0-beta.2 // indirect ) require ( @@ -246,10 +246,10 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect go.etcd.io/bbolt v1.3.8 // indirect golang.org/x/arch v0.8.0 // indirect - golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 golang.org/x/tools v0.24.0 // indirect google.golang.org/api v0.169.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect @@ -261,3 +261,5 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect ) + +// replace github.com/xhofe/115-sdk-go => ../../xhofe/115-sdk-go diff --git a/go.sum b/go.sum index e6a8574b..1b5f46f2 100644 --- a/go.sum +++ b/go.sum @@ -606,6 +606,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xhofe/115-sdk-go v0.1.1 h1:eMQIuCyhWZHQApqdCIt7bTA3S5MYQnANeLJbWYSDv6A= +github.com/xhofe/115-sdk-go v0.1.1/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM= @@ -663,8 +665,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -731,8 +733,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -752,8 +754,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -793,8 +795,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -807,8 +809,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -825,8 +827,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= @@ -953,6 +955,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +resty.dev/v3 v3.0.0-beta.2 h1:xu4mGAdbCLuc3kbk7eddWfWm4JfhwDtdapwss5nCjnQ= +resty.dev/v3 v3.0.0-beta.2/go.mod h1:OgkqiPvTDtOuV4MGZuUDhwOpkY8enjOsjjMzeOHefy4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= From b4e6ab12d9b093ab328f45879cae298b89278d93 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Tue, 18 Mar 2025 22:02:33 +0800 Subject: [PATCH 136/187] refactor: FilterReadMeScripts (#8154 close #8150) * refactor: FilterReadMeScripts * . --- drivers/quqi/driver.go | 4 +- drivers/quqi/util.go | 11 ------ drivers/vtencent/util.go | 4 +- internal/net/request.go | 5 +-- pkg/utils/path.go | 2 +- server/common/proxy.go | 80 ++-------------------------------------- server/handles/down.go | 43 ++++++++++++++++++++- 7 files changed, 51 insertions(+), 98 deletions(-) diff --git a/drivers/quqi/driver.go b/drivers/quqi/driver.go index 0fa64041..36758bd1 100644 --- a/drivers/quqi/driver.go +++ b/drivers/quqi/driver.go @@ -316,7 +316,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea // if the file already exists in Quqi server, there is no need to actually upload it if uploadInitResp.Data.Exist { // the file name returned by Quqi does not include the extension name - nodeName, nodeExt := uploadInitResp.Data.NodeName, rawExt(stream.GetName()) + nodeName, nodeExt := uploadInitResp.Data.NodeName, utils.Ext(stream.GetName()) if nodeExt != "" { nodeName = nodeName + "." + nodeExt } @@ -432,7 +432,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea return nil, err } // the file name returned by Quqi does not include the extension name - nodeName, nodeExt := uploadFinishResp.Data.NodeName, rawExt(stream.GetName()) + nodeName, nodeExt := uploadFinishResp.Data.NodeName, utils.Ext(stream.GetName()) if nodeExt != "" { nodeName = nodeName + "." + nodeExt } diff --git a/drivers/quqi/util.go b/drivers/quqi/util.go index 5ad43c4b..aa184d70 100644 --- a/drivers/quqi/util.go +++ b/drivers/quqi/util.go @@ -9,7 +9,6 @@ import ( "io" "net/http" "net/url" - stdpath "path" "strings" "time" @@ -115,16 +114,6 @@ func (d *Quqi) checkLogin() bool { return true } -// rawExt 保留扩展名大小写 -func rawExt(name string) string { - ext := stdpath.Ext(name) - if strings.HasPrefix(ext, ".") { - ext = ext[1:] - } - - return ext -} - // decryptKey 获取密码 func decryptKey(encodeKey string) []byte { // 移除非法字符 diff --git a/drivers/vtencent/util.go b/drivers/vtencent/util.go index 91db54b7..4ba72d1b 100644 --- a/drivers/vtencent/util.go +++ b/drivers/vtencent/util.go @@ -8,9 +8,7 @@ import ( "fmt" "io" "net/http" - "path" "strconv" - "strings" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" @@ -151,7 +149,7 @@ func (d *Vtencent) ApplyUploadUGC(signature string, stream model.FileStreamer) ( form := base.Json{ "signature": signature, "videoName": stream.GetName(), - "videoType": strings.ReplaceAll(path.Ext(stream.GetName()), ".", ""), + "videoType": utils.Ext(stream.GetName()), "videoSize": stream.GetSize(), } var resps RspApplyUploadUGC diff --git a/internal/net/request.go b/internal/net/request.go index c9ef363f..d4f9321c 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -619,10 +619,9 @@ type Buf struct { // NewBuf is a buffer that can have 1 read & 1 write at the same time. // when read is faster write, immediately feed data to read after written func NewBuf(ctx context.Context, maxSize int) *Buf { - d := make([]byte, 0, maxSize) return &Buf{ ctx: ctx, - buffer: bytes.NewBuffer(d), + buffer: bytes.NewBuffer(make([]byte, 0, maxSize)), size: maxSize, } } @@ -677,5 +676,5 @@ func (br *Buf) Write(p []byte) (n int, err error) { } func (br *Buf) Close() { - br.buffer.Reset() + br.buffer = nil } diff --git a/pkg/utils/path.go b/pkg/utils/path.go index c0793a3e..135f8e4e 100644 --- a/pkg/utils/path.go +++ b/pkg/utils/path.go @@ -45,7 +45,7 @@ func IsSubPath(path string, subPath string) bool { func Ext(path string) string { ext := stdpath.Ext(path) - if strings.HasPrefix(ext, ".") { + if len(ext) > 0 && ext[0] == '.' { ext = ext[1:] } return strings.ToLower(ext) diff --git a/server/common/proxy.go b/server/common/proxy.go index 00fee4b2..c14af6fa 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -1,97 +1,25 @@ package common import ( - "bytes" "context" "fmt" "io" "net/http" "net/url" "os" - "strconv" "strings" - "github.com/alist-org/alist/v3/internal/conf" + "maps" + "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" - "github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" - "github.com/microcosm-cc/bluemonday" log "github.com/sirupsen/logrus" - "github.com/yuin/goldmark" ) -func processMarkdown(content []byte) ([]byte, error) { - var buf bytes.Buffer - if err := goldmark.New().Convert(content, &buf); err != nil { - return nil, fmt.Errorf("markdown conversion failed: %w", err) - } - return bluemonday.UGCPolicy().SanitizeBytes(buf.Bytes()), nil -} - func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error { - - //优先处理md文件 - if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) { - var markdownContent []byte - var err error - - if link.MFile != nil { - defer link.MFile.Close() - attachHeader(w, file) - markdownContent, err = io.ReadAll(link.MFile) - if err != nil { - return fmt.Errorf("failed to read markdown content: %w", err) - } - } else if link.RangeReadCloser != nil { - attachHeader(w, file) - rrc, err := link.RangeReadCloser.RangeRead(r.Context(), http_range.Range{Start: 0, Length: -1}) - if err != nil { - return err - } - defer rrc.Close() - markdownContent, err = io.ReadAll(rrc) - if err != nil { - return fmt.Errorf("failed to read markdown content: %w", err) - } - } else { - header := net.ProcessHeader(r.Header, link.Header) - res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL) - if err != nil { - return err - } - defer res.Body.Close() - for h, v := range res.Header { - w.Header()[h] = v - } - w.WriteHeader(res.StatusCode) - if r.Method == http.MethodHead { - return nil - } - markdownContent, err = io.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("failed to read markdown content: %w", err) - } - - } - - safeHTML, err := processMarkdown(markdownContent) - if err != nil { - return err - } - - safeHTMLReader := bytes.NewReader(safeHTML) - w.Header().Set("Content-Length", strconv.FormatInt(int64(len(safeHTML)), 10)) - w.Header().Set("Content-Type", "text/html; charset=utf-8") - _, err = utils.CopyWithBuffer(w, safeHTMLReader) - if err != nil { - return err - } - return nil - } - if link.MFile != nil { defer link.MFile.Close() attachHeader(w, file) @@ -152,9 +80,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. } defer res.Body.Close() - for h, v := range res.Header { - w.Header()[h] = v - } + maps.Copy(w.Header(), res.Header) w.WriteHeader(res.StatusCode) if r.Method == http.MethodHead { return nil diff --git a/server/handles/down.go b/server/handles/down.go index b2f9a21b..1153881f 100644 --- a/server/handles/down.go +++ b/server/handles/down.go @@ -1,9 +1,12 @@ package handles import ( + "bytes" "fmt" "io" + "net/http" stdpath "path" + "strconv" "strings" "github.com/alist-org/alist/v3/internal/conf" @@ -15,7 +18,9 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" + "github.com/microcosm-cc/bluemonday" log "github.com/sirupsen/logrus" + "github.com/yuin/goldmark" ) func Down(c *gin.Context) { @@ -124,7 +129,34 @@ func localProxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange boo if proxyRange { common.ProxyRange(link, file.GetSize()) } - err = common.Proxy(c.Writer, c.Request, link, file) + + //优先处理md文件 + if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) { + w := c.Writer + buf := bytes.NewBuffer(make([]byte, 0, file.GetSize())) + err = common.Proxy(&proxyResponseWriter{ResponseWriter: w, Writer: buf}, c.Request, link, file) + if err == nil && buf.Len() > 0 { + if w.Status() < 200 || w.Status() > 300 { + w.Write(buf.Bytes()) + return + } + + var html bytes.Buffer + if err = goldmark.Convert(buf.Bytes(), &html); err != nil { + err = fmt.Errorf("markdown conversion failed: %w", err) + } else { + buf.Reset() + err = bluemonday.UGCPolicy().SanitizeReaderToWriter(&html, buf) + if err == nil { + w.Header().Set("Content-Length", strconv.FormatInt(int64(buf.Len()), 10)) + w.Header().Set("Content-Type", "text/html; charset=utf-8") + _, err = utils.CopyWithBuffer(c.Writer, buf) + } + } + } + } else { + err = common.Proxy(c.Writer, c.Request, link, file) + } if err != nil { common.ErrorResp(c, err, 500, true) return @@ -150,3 +182,12 @@ func canProxy(storage driver.Driver, filename string) bool { } return false } + +type proxyResponseWriter struct { + http.ResponseWriter + io.Writer +} + +func (pw *proxyResponseWriter) Write(p []byte) (int, error) { + return pw.Writer.Write(p) +} From 35d6f3b8fc818a6ad5c9c39088170c37619327b0 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Tue, 18 Mar 2025 22:21:50 +0800 Subject: [PATCH 137/187] fix(115_open): upgrade sdk (close #8151) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 557c16c2..b506254c 100644 --- a/go.mod +++ b/go.mod @@ -111,7 +111,7 @@ require ( github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/xhofe/115-sdk-go v0.1.1 + github.com/xhofe/115-sdk-go v0.1.2 github.com/yuin/goldmark v1.7.8 go4.org v0.0.0-20230225012048-214862532bf5 // indirect resty.dev/v3 v3.0.0-beta.2 // indirect diff --git a/go.sum b/go.sum index 1b5f46f2..245010d4 100644 --- a/go.sum +++ b/go.sum @@ -606,8 +606,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xhofe/115-sdk-go v0.1.1 h1:eMQIuCyhWZHQApqdCIt7bTA3S5MYQnANeLJbWYSDv6A= -github.com/xhofe/115-sdk-go v0.1.1/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= +github.com/xhofe/115-sdk-go v0.1.2 h1:Y58Zg+pz9D5FDCgwdg7T/F+6/t07/F1Ni/5bRa7yJNA= +github.com/xhofe/115-sdk-go v0.1.2/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM= From 4563aea47e5039248b80854420f90bfe83da1d2d Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Tue, 18 Mar 2025 22:25:04 +0800 Subject: [PATCH 138/187] fix(115_open): rename delay to take effect (close #8156) --- drivers/115_open/driver.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go index 67c17608..00337c0b 100644 --- a/drivers/115_open/driver.go +++ b/drivers/115_open/driver.go @@ -149,6 +149,10 @@ func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) if err != nil { return nil, err } + obj, ok := srcObj.(*Obj) + if ok { + obj.Fn = newName + } return srcObj, nil } From 758554a40fe4f5a3b91b6b89cdad5150ffe44d65 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Wed, 19 Mar 2025 21:47:42 +0800 Subject: [PATCH 139/187] fix(115_open): upgrade 115-sdk-go dependency to v0.1.3 (close #8169) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b506254c..bdbb1c8e 100644 --- a/go.mod +++ b/go.mod @@ -111,7 +111,7 @@ require ( github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/xhofe/115-sdk-go v0.1.2 + github.com/xhofe/115-sdk-go v0.1.3 github.com/yuin/goldmark v1.7.8 go4.org v0.0.0-20230225012048-214862532bf5 // indirect resty.dev/v3 v3.0.0-beta.2 // indirect diff --git a/go.sum b/go.sum index 245010d4..2fbc48da 100644 --- a/go.sum +++ b/go.sum @@ -606,8 +606,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xhofe/115-sdk-go v0.1.2 h1:Y58Zg+pz9D5FDCgwdg7T/F+6/t07/F1Ni/5bRa7yJNA= -github.com/xhofe/115-sdk-go v0.1.2/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= +github.com/xhofe/115-sdk-go v0.1.3 h1:n/00JkEwNOZUb7+U8BgrftotMbPf8yQKpm1bYc+WBoE= +github.com/xhofe/115-sdk-go v0.1.3/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM= From 32890da29f72e380ffd6e64df4a693e5cd4f3baa Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Fri, 21 Mar 2025 19:06:09 +0800 Subject: [PATCH 140/187] fix(115_open): upgrade 115-sdk-go dependency to v0.1.4 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bdbb1c8e..a06c62ba 100644 --- a/go.mod +++ b/go.mod @@ -111,7 +111,7 @@ require ( github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/xhofe/115-sdk-go v0.1.3 + github.com/xhofe/115-sdk-go v0.1.4 github.com/yuin/goldmark v1.7.8 go4.org v0.0.0-20230225012048-214862532bf5 // indirect resty.dev/v3 v3.0.0-beta.2 // indirect diff --git a/go.sum b/go.sum index 2fbc48da..bf98a8cd 100644 --- a/go.sum +++ b/go.sum @@ -606,8 +606,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xhofe/115-sdk-go v0.1.3 h1:n/00JkEwNOZUb7+U8BgrftotMbPf8yQKpm1bYc+WBoE= -github.com/xhofe/115-sdk-go v0.1.3/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= +github.com/xhofe/115-sdk-go v0.1.4 h1:erIWuWH+kZQOEHM+YZK8Y6sWQ2s/SFJIFh/WeCtjiiY= +github.com/xhofe/115-sdk-go v0.1.4/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM= From 6e13923225afdc6c95b09996636919b67c96d62b Mon Sep 17 00:00:00 2001 From: KirCute <951206789@qq.com> Date: Thu, 27 Mar 2025 23:14:36 +0800 Subject: [PATCH 141/187] fix(sftp-server): postgre cannot store control characters (#8188 close #8186) --- internal/op/sshkey.go | 1 - server/handles/sshkey.go | 3 ++- server/sftp.go | 12 ++++++++---- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/internal/op/sshkey.go b/internal/op/sshkey.go index 6ed55658..139698e6 100644 --- a/internal/op/sshkey.go +++ b/internal/op/sshkey.go @@ -17,7 +17,6 @@ func CreateSSHPublicKey(k *model.SSHPublicKey) (error, bool) { if err != nil { return err, false } - k.KeyStr = string(pubKey.Marshal()) k.Fingerprint = ssh.FingerprintSHA256(pubKey) k.AddedTime = time.Now() k.LastUsedTime = k.AddedTime diff --git a/server/handles/sshkey.go b/server/handles/sshkey.go index c53b46f2..6f8d46b4 100644 --- a/server/handles/sshkey.go +++ b/server/handles/sshkey.go @@ -6,6 +6,7 @@ import ( "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" "strconv" + "strings" ) type SSHKeyAddReq struct { @@ -30,7 +31,7 @@ func AddMyPublicKey(c *gin.Context) { } key := &model.SSHPublicKey{ Title: req.Title, - KeyStr: req.Key, + KeyStr: strings.TrimSpace(req.Key), UserId: userObj.ID, } err, parsed := op.CreateSSHPublicKey(key) diff --git a/server/sftp.go b/server/sftp.go index 0455c962..42c676e8 100644 --- a/server/sftp.go +++ b/server/sftp.go @@ -113,11 +113,15 @@ func (d *SftpDriver) PublicKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s } marshal := string(key.Marshal()) for _, sk := range keys { - if marshal == sk.KeyStr { - sk.LastUsedTime = time.Now() - _ = op.UpdateSSHPublicKey(&sk) - return nil, nil + if marshal != sk.KeyStr { + pubKey, _, _, _, e := ssh.ParseAuthorizedKey([]byte(sk.KeyStr)) + if e != nil || marshal != string(pubKey.Marshal()) { + continue + } } + sk.LastUsedTime = time.Now() + _ = op.UpdateSSHPublicKey(&sk) + return nil, nil } return nil, errors.New("public key refused") } From 10a76c701dc5fd1a2557f6c2367b88b490aa4a33 Mon Sep 17 00:00:00 2001 From: Ljcbaby <46277145+ljcbaby@users.noreply.github.com> Date: Thu, 27 Mar 2025 23:15:04 +0800 Subject: [PATCH 142/187] fix(db): support postgres trust/peer mode (#8198 close #8066) --- internal/bootstrap/db.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/internal/bootstrap/db.go b/internal/bootstrap/db.go index 39b659b7..5f5f6fce 100644 --- a/internal/bootstrap/db.go +++ b/internal/bootstrap/db.go @@ -68,8 +68,13 @@ func InitDB() { { dsn := database.DSN if dsn == "" { - dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai", - database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode) + if database.Password != "" { + dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai", + database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode) + } else { + dsn = fmt.Sprintf("host=%s user=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai", + database.Host, database.User, database.Name, database.Port, database.SSLMode) + } } dB, err = gorm.Open(postgres.Open(dsn), gormConfig) } From 4fcc3a187e19de022116a41d884b02e74d2da70e Mon Sep 17 00:00:00 2001 From: KirCute <951206789@qq.com> Date: Thu, 27 Mar 2025 23:15:47 +0800 Subject: [PATCH 143/187] fix(traffic): duplicate semaphore release when uploading (#8211 close #8180) --- drivers/189pc/utils.go | 6 +++--- drivers/baidu_netdisk/driver.go | 6 +++--- drivers/baidu_photo/driver.go | 6 +++--- drivers/mopan/driver.go | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index 290d2e56..fb1a183a 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -520,9 +520,6 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo if utils.IsCanceled(upCtx) { break } - if err = sem.Acquire(ctx, 1); err != nil { - break - } byteData := make([]byte, sliceSize) if i == count { byteData = byteData[:lastPartSize] @@ -541,6 +538,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) threadG.Go(func(ctx context.Context) error { + if err = sem.Acquire(ctx, 1); err != nil { + return err + } defer sem.Release(1) uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) if err != nil { diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 264f3b02..6ea62197 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -266,15 +266,15 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F if utils.IsCanceled(upCtx) { break } - if err = sem.Acquire(ctx, 1); err != nil { - break - } i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize if partseq+1 == count { byteSize = lastBlockSize } threadG.Go(func(ctx context.Context) error { + if err = sem.Acquire(ctx, 1); err != nil { + return err + } defer sem.Release(1) params := map[string]string{ "method": "upload", diff --git a/drivers/baidu_photo/driver.go b/drivers/baidu_photo/driver.go index 9ee0a7ae..eeee746f 100644 --- a/drivers/baidu_photo/driver.go +++ b/drivers/baidu_photo/driver.go @@ -321,9 +321,6 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if utils.IsCanceled(upCtx) { break } - if err = sem.Acquire(ctx, 1); err != nil { - break - } i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT if partseq+1 == count { @@ -331,6 +328,9 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil } threadG.Go(func(ctx context.Context) error { + if err = sem.Acquire(ctx, 1); err != nil { + return err + } defer sem.Release(1) uploadParams := map[string]string{ "method": "upload", diff --git a/drivers/mopan/driver.go b/drivers/mopan/driver.go index 2cbabe46..736d612a 100644 --- a/drivers/mopan/driver.go +++ b/drivers/mopan/driver.go @@ -315,9 +315,6 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre if utils.IsCanceled(upCtx) { break } - if err = sem.Acquire(ctx, 1); err != nil { - break - } i, part, byteSize := i, part, initUpdload.PartSize if part.PartNumber == uploadPartData.PartTotal { byteSize = initUpdload.LastPartSize @@ -325,6 +322,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre // step.4 threadG.Go(func(ctx context.Context) error { + if err = sem.Acquire(ctx, 1); err != nil { + return err + } defer sem.Release(1) reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize) req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader)) From 9a9aee9ac6e968fe18645b14d69830eb00e33182 Mon Sep 17 00:00:00 2001 From: KirCute <951206789@qq.com> Date: Thu, 27 Mar 2025 23:17:45 +0800 Subject: [PATCH 144/187] feat(alias): support writing to non-ambiguous paths (#8216) * feat(alias): support writing to non-ambiguous paths * feat(alias): support extract concurrency * fix(alias): extract url no pass query --- drivers/alias/driver.go | 174 +++++++++++++++++++++++++- drivers/alias/meta.go | 3 +- drivers/alias/util.go | 71 ++++++++++- internal/fs/fs.go | 23 +++- internal/offline_download/tool/add.go | 41 +++--- internal/op/archive.go | 2 +- 6 files changed, 285 insertions(+), 29 deletions(-) diff --git a/drivers/alias/driver.go b/drivers/alias/driver.go index 16215c8e..e292a628 100644 --- a/drivers/alias/driver.go +++ b/drivers/alias/driver.go @@ -3,6 +3,7 @@ package alias import ( "context" "errors" + stdpath "path" "strings" "github.com/alist-org/alist/v3/internal/driver" @@ -126,8 +127,46 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( return nil, errs.ObjectNotFound } +func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, parentDir, true) + if err == nil { + return fs.MakeDir(ctx, stdpath.Join(*reqPath, dirName)) + } + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot make sub-dir") + } + return err +} + +func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if !d.Writable { + return errs.PermissionDenied + } + srcPath, err := d.getReqPath(ctx, srcObj, false) + if errs.IsNotImplement(err) { + return errors.New("same-name files cannot be moved") + } + if err != nil { + return err + } + dstPath, err := d.getReqPath(ctx, dstDir, true) + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot be moved to") + } + if err != nil { + return err + } + return fs.Move(ctx, *srcPath, *dstPath) +} + func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error { - reqPath, err := d.getReqPath(ctx, srcObj) + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, srcObj, false) if err == nil { return fs.Rename(ctx, *reqPath, newName) } @@ -137,8 +176,33 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er return err } +func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + if !d.Writable { + return errs.PermissionDenied + } + srcPath, err := d.getReqPath(ctx, srcObj, false) + if errs.IsNotImplement(err) { + return errors.New("same-name files cannot be copied") + } + if err != nil { + return err + } + dstPath, err := d.getReqPath(ctx, dstDir, true) + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot be copied to") + } + if err != nil { + return err + } + _, err = fs.Copy(ctx, *srcPath, *dstPath) + return err +} + func (d *Alias) Remove(ctx context.Context, obj model.Obj) error { - reqPath, err := d.getReqPath(ctx, obj) + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, obj, false) if err == nil { return fs.Remove(ctx, *reqPath) } @@ -148,4 +212,110 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error { return err } +func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, dstDir, true) + if err == nil { + return fs.PutDirectly(ctx, *reqPath, s) + } + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot be Put") + } + return err +} + +func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error { + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, dstDir, true) + if err == nil { + return fs.PutURL(ctx, *reqPath, name, url) + } + if errs.IsNotImplement(err) { + return errors.New("same-name files cannot offline download") + } + return err +} + +func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + root, sub := d.getRootAndPath(obj.GetPath()) + dsts, ok := d.pathMap[root] + if !ok { + return nil, errs.ObjectNotFound + } + for _, dst := range dsts { + meta, err := d.getArchiveMeta(ctx, dst, sub, args) + if err == nil { + return meta, nil + } + } + return nil, errs.NotImplement +} + +func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + root, sub := d.getRootAndPath(obj.GetPath()) + dsts, ok := d.pathMap[root] + if !ok { + return nil, errs.ObjectNotFound + } + for _, dst := range dsts { + l, err := d.listArchive(ctx, dst, sub, args) + if err == nil { + return l, nil + } + } + return nil, errs.NotImplement +} + +func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // alias的两个驱动,一个支持驱动提取,一个不支持,如何兼容? + // 如果访问的是不支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回errs.NotImplement,提取URL前缀就会是/ae,Extract就不会被调用 + // 如果访问的是支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回有效值,提取URL前缀就会是/ad,Extract就会被调用 + root, sub := d.getRootAndPath(obj.GetPath()) + dsts, ok := d.pathMap[root] + if !ok { + return nil, errs.ObjectNotFound + } + for _, dst := range dsts { + link, err := d.extract(ctx, dst, sub, args) + if err == nil { + if !args.Redirect && len(link.URL) > 0 { + if d.DownloadConcurrency > 0 { + link.Concurrency = d.DownloadConcurrency + } + if d.DownloadPartSize > 0 { + link.PartSize = d.DownloadPartSize * utils.KB + } + } + return link, nil + } + } + return nil, errs.NotImplement +} + +func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error { + if !d.Writable { + return errs.PermissionDenied + } + srcPath, err := d.getReqPath(ctx, srcObj, false) + if errs.IsNotImplement(err) { + return errors.New("same-name files cannot be decompressed") + } + if err != nil { + return err + } + dstPath, err := d.getReqPath(ctx, dstDir, true) + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot be decompressed to") + } + if err != nil { + return err + } + _, err = fs.ArchiveDecompress(ctx, *srcPath, *dstPath, args) + return err +} + var _ driver.Driver = (*Alias)(nil) diff --git a/drivers/alias/meta.go b/drivers/alias/meta.go index ed657a5d..70dc59f0 100644 --- a/drivers/alias/meta.go +++ b/drivers/alias/meta.go @@ -13,13 +13,14 @@ type Addition struct { ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"` DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"` DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"` + Writable bool `json:"writable" type:"bool" default:"false"` } var config = driver.Config{ Name: "Alias", LocalSort: true, NoCache: true, - NoUpload: true, + NoUpload: false, DefaultRoot: "/", ProxyRangeOption: true, } diff --git a/drivers/alias/util.go b/drivers/alias/util.go index 2157a43d..ffb0b84f 100644 --- a/drivers/alias/util.go +++ b/drivers/alias/util.go @@ -3,9 +3,11 @@ package alias import ( "context" "fmt" + "net/url" stdpath "path" "strings" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" @@ -125,9 +127,9 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) return link, err } -func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) { +func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*string, error) { root, sub := d.getRootAndPath(obj.GetPath()) - if sub == "" { + if sub == "" && !isParent { return nil, errs.NotSupport } dsts, ok := d.pathMap[root] @@ -156,3 +158,68 @@ func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) } return reqPath, nil } + +func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) { + reqPath := stdpath.Join(dst, sub) + storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) + if err != nil { + return nil, err + } + if _, ok := storage.(driver.ArchiveReader); ok { + return op.GetArchiveMeta(ctx, storage, reqActualPath, model.ArchiveMetaArgs{ + ArchiveArgs: args, + Refresh: true, + }) + } + return nil, errs.NotImplement +} + +func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) { + reqPath := stdpath.Join(dst, sub) + storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) + if err != nil { + return nil, err + } + if _, ok := storage.(driver.ArchiveReader); ok { + return op.ListArchive(ctx, storage, reqActualPath, model.ArchiveListArgs{ + ArchiveInnerArgs: args, + Refresh: true, + }) + } + return nil, errs.NotImplement +} + +func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) { + reqPath := stdpath.Join(dst, sub) + storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) + if err != nil { + return nil, err + } + if _, ok := storage.(driver.ArchiveReader); ok { + if _, ok := storage.(*Alias); !ok && !args.Redirect { + link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args) + return link, err + } + _, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) + if err != nil { + return nil, err + } + if common.ShouldProxy(storage, stdpath.Base(sub)) { + link := &model.Link{ + URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s", + common.GetApiUrl(args.HttpReq), + utils.EncodePath(reqPath, true), + utils.EncodePath(args.InnerPath, true), + url.QueryEscape(args.Password), + sign.SignArchive(reqPath)), + } + if args.HttpReq != nil && d.ProxyRange { + link.RangeReadCloser = common.NoProxyRange + } + return link, nil + } + link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args) + return link, err + } + return nil, errs.NotImplement +} diff --git a/internal/fs/fs.go b/internal/fs/fs.go index a873f917..01818e5f 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -2,12 +2,15 @@ package fs import ( "context" + log "github.com/sirupsen/logrus" + "io" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/task" - log "github.com/sirupsen/logrus" - "io" + "github.com/pkg/errors" ) // the param named path of functions in this package is a mount path @@ -168,3 +171,19 @@ func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) { } return res, err } + +func PutURL(ctx context.Context, path, dstName, urlStr string) error { + storage, dstDirActualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return errors.WithMessage(err, "failed get storage") + } + if storage.Config().NoUpload { + return errors.WithStack(errs.UploadNotSupported) + } + _, ok := storage.(driver.PutURL) + _, okResult := storage.(driver.PutURLResult) + if !ok && !okResult { + return errs.NotImplement + } + return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr) +} diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go index 884e166b..d64e43e8 100644 --- a/internal/offline_download/tool/add.go +++ b/internal/offline_download/tool/add.go @@ -2,20 +2,20 @@ package tool import ( "context" + "net/url" + stdpath "path" + "path/filepath" + _115 "github.com/alist-org/alist/v3/drivers/115" "github.com/alist-org/alist/v3/drivers/pikpak" "github.com/alist-org/alist/v3/drivers/thunder" - "github.com/alist-org/alist/v3/internal/driver" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/setting" - "github.com/alist-org/alist/v3/internal/task" - "net/url" - "path" - "path/filepath" - "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/task" "github.com/google/uuid" "github.com/pkg/errors" ) @@ -59,8 +59,11 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro } } // try putting url - if args.Tool == "SimpleHttp" && tryPutUrl(ctx, storage, dstDirActualPath, args.URL) { - return nil, nil + if args.Tool == "SimpleHttp" { + err = tryPutUrl(ctx, args.DstDirPath, args.URL) + if err == nil || !errors.Is(err, errs.NotImplement) { + return nil, err + } } // get tool @@ -118,17 +121,13 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro return t, nil } -func tryPutUrl(ctx context.Context, storage driver.Driver, dstDirActualPath, urlStr string) bool { - _, ok := storage.(driver.PutURL) - _, okResult := storage.(driver.PutURLResult) - if !ok && !okResult { - return false - } +func tryPutUrl(ctx context.Context, path, urlStr string) error { + var dstName string u, err := url.Parse(urlStr) - if err != nil { - return false + if err == nil { + dstName = stdpath.Base(u.Path) + } else { + dstName = "UnnamedURL" } - dstName := path.Base(u.Path) - err = op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr) - return err == nil + return fs.PutURL(ctx, path, dstName, urlStr) } diff --git a/internal/op/archive.go b/internal/op/archive.go index a241838c..4015e299 100644 --- a/internal/op/archive.go +++ b/internal/op/archive.go @@ -84,7 +84,7 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs) if !errors.Is(err, errs.NotImplement) { archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true} - if meta.GetTree() != nil { + if meta != nil && meta.GetTree() != nil { archiveMetaProvider.Sort = &storage.GetStorage().Sort } if !storage.Config().NoCache { From 44cc71d354f6a21130010a9ae836510a429180fa Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Thu, 27 Mar 2025 23:18:15 +0800 Subject: [PATCH 145/187] fix(cloudreve): enable SetContentLength for uploading to local policy (#8228 close #8174) * fix(cloudreve): upload failure to return error msg instead of deletion success * fix(cloudreve): enable SetContentLength for uploading to local policy * refactor(cloudreve): move local policy upload logic to utils for better error handling * refactor(cloudreve): unified upload code style * refactor(cloudreve): improve user agent handling --- drivers/cloudreve/driver.go | 33 ++------------------- drivers/cloudreve/util.go | 58 ++++++++++++++++++++++++++++++------- 2 files changed, 50 insertions(+), 41 deletions(-) diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index 33ef7ddc..d0ab30b6 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -1,13 +1,10 @@ package cloudreve import ( - "bytes" "context" - "errors" "io" "net/http" "path" - "strconv" "strings" "github.com/alist-org/alist/v3/drivers/base" @@ -168,39 +165,13 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File case "remote": // 从机存储 err = d.upRemote(ctx, stream, u, up) case "local": // 本机存储 - var chunkSize = u.ChunkSize - var buf []byte - var chunk int - for { - var n int - buf = make([]byte, chunkSize) - n, err = io.ReadAtLeast(stream, buf, chunkSize) - if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) { - if err == io.EOF { - return nil - } - return err - } - if n == 0 { - break - } - buf = buf[:n] - err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { - req.SetHeader("Content-Type", "application/octet-stream") - req.SetHeader("Content-Length", strconv.Itoa(n)) - req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf))) - }, nil) - if err != nil { - break - } - chunk++ - } + err = d.upLocal(ctx, stream, u, up) default: err = errs.NotImplement } if err != nil { // 删除失败的会话 - err = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil) + _ = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil) return err } return nil diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index f41b6b84..cffa7988 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -27,17 +27,20 @@ import ( const loginPath = "/user/session" +func (d *Cloudreve) getUA() string { + if d.CustomUA != "" { + return d.CustomUA + } + return base.UserAgent +} + func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error { u := d.Address + "/api/v3" + path - ua := d.CustomUA - if ua == "" { - ua = base.UserAgent - } req := base.RestyClient.R() req.SetHeaders(map[string]string{ "Cookie": "cloudreve-session=" + d.Cookie, "Accept": "application/json, text/plain, */*", - "User-Agent": ua, + "User-Agent": d.getUA(), }) var r Resp @@ -161,15 +164,11 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) { if !d.Addition.EnableThumbAndFolderSize { return model.Thumbnail{}, nil } - ua := d.CustomUA - if ua == "" { - ua = base.UserAgent - } req := base.NoRedirectClient.R() req.SetHeaders(map[string]string{ "Cookie": "cloudreve-session=" + d.Cookie, "Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8", - "User-Agent": ua, + "User-Agent": d.getUA(), }) resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id) if err != nil { @@ -180,6 +179,43 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) { }, nil } +func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { + var finish int64 = 0 + var chunk int = 0 + DEFAULT := int64(u.ChunkSize) + for finish < stream.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish) + var byteSize = DEFAULT + left := stream.GetSize() - finish + if left < DEFAULT { + byteSize = left + } + byteData := make([]byte, byteSize) + n, err := io.ReadFull(stream, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { + req.SetHeader("Content-Type", "application/octet-stream") + req.SetContentLength(true) + req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10)) + req.SetHeader("User-Agent", d.getUA()) + req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) + }, nil) + if err != nil { + break + } + finish += byteSize + up(float64(finish) * 100 / float64(stream.GetSize())) + chunk++ + } + return nil +} + func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { uploadUrl := u.UploadURLs[0] credential := u.Credential @@ -211,6 +247,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U req.ContentLength = byteSize // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Authorization", fmt.Sprint(credential)) + req.Header.Set("User-Agent", d.getUA()) finish += byteSize res, err := base.HttpClient.Do(req) if err != nil { @@ -251,6 +288,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u req.ContentLength = byteSize // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) + req.Header.Set("User-Agent", d.getUA()) finish += byteSize res, err := base.HttpClient.Do(req) if err != nil { From 704d3854df234b59b0635d32aa111aa5a811bfa1 Mon Sep 17 00:00:00 2001 From: KirCute <951206789@qq.com> Date: Thu, 27 Mar 2025 23:18:34 +0800 Subject: [PATCH 146/187] feat(alist_v3): support forward archive requests (#8230) * feat(alist_v3): support forward archive requests * fix: encode all inner path --- drivers/alist_v3/driver.go | 145 ++++++++++++++++++++++++++++++++++--- drivers/alist_v3/meta.go | 13 ++-- drivers/alist_v3/types.go | 87 ++++++++++++++++++++++ drivers/alist_v3/util.go | 18 +++-- 4 files changed, 239 insertions(+), 24 deletions(-) diff --git a/drivers/alist_v3/driver.go b/drivers/alist_v3/driver.go index 5a299ea0..ac7e16a1 100644 --- a/drivers/alist_v3/driver.go +++ b/drivers/alist_v3/driver.go @@ -5,12 +5,14 @@ import ( "fmt" "io" "net/http" + "net/url" "path" "strings" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" @@ -34,7 +36,7 @@ func (d *AListV3) GetAddition() driver.Additional { func (d *AListV3) Init(ctx context.Context) error { d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/") var resp common.Resp[MeResp] - _, err := d.request("/me", http.MethodGet, func(req *resty.Request) { + _, _, err := d.request("/me", http.MethodGet, func(req *resty.Request) { req.SetResult(&resp) }) if err != nil { @@ -48,15 +50,15 @@ func (d *AListV3) Init(ctx context.Context) error { } } // re-get the user info - _, err = d.request("/me", http.MethodGet, func(req *resty.Request) { + _, _, err = d.request("/me", http.MethodGet, func(req *resty.Request) { req.SetResult(&resp) }) if err != nil { return err } if resp.Data.Role == model.GUEST { - url := d.Address + "/api/public/settings" - res, err := base.RestyClient.R().Get(url) + u := d.Address + "/api/public/settings" + res, err := base.RestyClient.R().Get(u) if err != nil { return err } @@ -74,7 +76,7 @@ func (d *AListV3) Drop(ctx context.Context) error { func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { var resp common.Resp[FsListResp] - _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) { req.SetResult(&resp).SetBody(ListReq{ PageReq: model.PageReq{ Page: 1, @@ -116,7 +118,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) userAgent = base.UserAgent } } - _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) { req.SetResult(&resp).SetBody(FsGetReq{ Path: file.GetPath(), Password: d.MetaPassword, @@ -131,7 +133,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) } func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { - _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) { req.SetBody(MkdirOrLinkReq{ Path: path.Join(parentDir.GetPath(), dirName), }) @@ -140,7 +142,7 @@ func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri } func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error { - _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) { req.SetBody(MoveCopyReq{ SrcDir: path.Dir(srcObj.GetPath()), DstDir: dstDir.GetPath(), @@ -151,7 +153,7 @@ func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error { } func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error { - _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) { req.SetBody(RenameReq{ Path: srcObj.GetPath(), Name: newName, @@ -161,7 +163,7 @@ func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) } func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { - _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) { req.SetBody(MoveCopyReq{ SrcDir: path.Dir(srcObj.GetPath()), DstDir: dstDir.GetPath(), @@ -172,7 +174,7 @@ func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { } func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error { - _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) { req.SetBody(RemoveReq{ Dir: path.Dir(obj.GetPath()), Names: []string{obj.GetName()}, @@ -232,6 +234,127 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame return nil } +func (d *AListV3) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + if !d.ForwardArchiveReq { + return nil, errs.NotImplement + } + var resp common.Resp[ArchiveMetaResp] + _, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) { + req.SetResult(&resp).SetBody(ArchiveMetaReq{ + ArchivePass: args.Password, + Password: d.MetaPassword, + Path: obj.GetPath(), + Refresh: false, + }) + }) + if code == 202 { + return nil, errs.WrongArchivePassword + } + if err != nil { + return nil, err + } + var tree []model.ObjTree + if resp.Data.Content != nil { + tree = make([]model.ObjTree, 0, len(resp.Data.Content)) + for _, content := range resp.Data.Content { + tree = append(tree, &content) + } + } + return &model.ArchiveMetaInfo{ + Comment: resp.Data.Comment, + Encrypted: resp.Data.Encrypted, + Tree: tree, + }, nil +} + +func (d *AListV3) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + if !d.ForwardArchiveReq { + return nil, errs.NotImplement + } + var resp common.Resp[ArchiveListResp] + _, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) { + req.SetResult(&resp).SetBody(ArchiveListReq{ + ArchiveMetaReq: ArchiveMetaReq{ + ArchivePass: args.Password, + Password: d.MetaPassword, + Path: obj.GetPath(), + Refresh: false, + }, + PageReq: model.PageReq{ + Page: 1, + PerPage: 0, + }, + InnerPath: args.InnerPath, + }) + }) + if code == 202 { + return nil, errs.WrongArchivePassword + } + if err != nil { + return nil, err + } + var files []model.Obj + for _, f := range resp.Data.Content { + file := model.ObjThumb{ + Object: model.Object{ + Name: f.Name, + Modified: f.Modified, + Ctime: f.Created, + Size: f.Size, + IsFolder: f.IsDir, + HashInfo: utils.FromString(f.HashInfo), + }, + Thumbnail: model.Thumbnail{Thumbnail: f.Thumb}, + } + files = append(files, &file) + } + return files, nil +} + +func (d *AListV3) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + if !d.ForwardArchiveReq { + return nil, errs.NotSupport + } + var resp common.Resp[ArchiveMetaResp] + _, _, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) { + req.SetResult(&resp).SetBody(ArchiveMetaReq{ + ArchivePass: args.Password, + Password: d.MetaPassword, + Path: obj.GetPath(), + Refresh: false, + }) + }) + if err != nil { + return nil, err + } + return &model.Link{ + URL: fmt.Sprintf("%s?inner=%s&pass=%s&sign=%s", + resp.Data.RawURL, + utils.EncodePath(args.InnerPath, true), + url.QueryEscape(args.Password), + resp.Data.Sign), + }, nil +} + +func (d *AListV3) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error { + if !d.ForwardArchiveReq { + return errs.NotImplement + } + dir, name := path.Split(srcObj.GetPath()) + _, _, err := d.request("/fs/archive/decompress", http.MethodPost, func(req *resty.Request) { + req.SetBody(DecompressReq{ + ArchivePass: args.Password, + CacheFull: args.CacheFull, + DstDir: dstDir.GetPath(), + InnerPath: args.InnerPath, + Name: []string{name}, + PutIntoNewDir: args.PutIntoNewDir, + SrcDir: dir, + }) + }) + return err +} + //func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/alist_v3/meta.go b/drivers/alist_v3/meta.go index cc5f2189..1e8b3c53 100644 --- a/drivers/alist_v3/meta.go +++ b/drivers/alist_v3/meta.go @@ -7,12 +7,13 @@ import ( type Addition struct { driver.RootPath - Address string `json:"url" required:"true"` - MetaPassword string `json:"meta_password"` - Username string `json:"username"` - Password string `json:"password"` - Token string `json:"token"` - PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"` + Address string `json:"url" required:"true"` + MetaPassword string `json:"meta_password"` + Username string `json:"username"` + Password string `json:"password"` + Token string `json:"token"` + PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"` + ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"` } var config = driver.Config{ diff --git a/drivers/alist_v3/types.go b/drivers/alist_v3/types.go index e517307f..1ae7926e 100644 --- a/drivers/alist_v3/types.go +++ b/drivers/alist_v3/types.go @@ -4,6 +4,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" ) type ListReq struct { @@ -81,3 +82,89 @@ type MeResp struct { SsoId string `json:"sso_id"` Otp bool `json:"otp"` } + +type ArchiveMetaReq struct { + ArchivePass string `json:"archive_pass"` + Password string `json:"password"` + Path string `json:"path"` + Refresh bool `json:"refresh"` +} + +type TreeResp struct { + ObjResp + Children []TreeResp `json:"children"` + hashCache *utils.HashInfo +} + +func (t *TreeResp) GetSize() int64 { + return t.Size +} + +func (t *TreeResp) GetName() string { + return t.Name +} + +func (t *TreeResp) ModTime() time.Time { + return t.Modified +} + +func (t *TreeResp) CreateTime() time.Time { + return t.Created +} + +func (t *TreeResp) IsDir() bool { + return t.ObjResp.IsDir +} + +func (t *TreeResp) GetHash() utils.HashInfo { + return utils.FromString(t.HashInfo) +} + +func (t *TreeResp) GetID() string { + return "" +} + +func (t *TreeResp) GetPath() string { + return "" +} + +func (t *TreeResp) GetChildren() []model.ObjTree { + ret := make([]model.ObjTree, 0, len(t.Children)) + for _, child := range t.Children { + ret = append(ret, &child) + } + return ret +} + +func (t *TreeResp) Thumb() string { + return t.ObjResp.Thumb +} + +type ArchiveMetaResp struct { + Comment string `json:"comment"` + Encrypted bool `json:"encrypted"` + Content []TreeResp `json:"content"` + RawURL string `json:"raw_url"` + Sign string `json:"sign"` +} + +type ArchiveListReq struct { + model.PageReq + ArchiveMetaReq + InnerPath string `json:"inner_path"` +} + +type ArchiveListResp struct { + Content []ObjResp `json:"content"` + Total int64 `json:"total"` +} + +type DecompressReq struct { + ArchivePass string `json:"archive_pass"` + CacheFull bool `json:"cache_full"` + DstDir string `json:"dst_dir"` + InnerPath string `json:"inner_path"` + Name []string `json:"name"` + PutIntoNewDir bool `json:"put_into_new_dir"` + SrcDir string `json:"src_dir"` +} diff --git a/drivers/alist_v3/util.go b/drivers/alist_v3/util.go index 5ede285a..50c20250 100644 --- a/drivers/alist_v3/util.go +++ b/drivers/alist_v3/util.go @@ -17,7 +17,7 @@ func (d *AListV3) login() error { return nil } var resp common.Resp[LoginResp] - _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) { req.SetResult(&resp).SetBody(base.Json{ "username": d.Username, "password": d.Password, @@ -31,7 +31,7 @@ func (d *AListV3) login() error { return nil } -func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) { +func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, int, error) { url := d.Address + "/api" + api req := base.RestyClient.R() req.SetHeader("Authorization", d.Token) @@ -40,22 +40,26 @@ func (d *AListV3) request(api, method string, callback base.ReqCallback, retry . } res, err := req.Execute(method, url) if err != nil { - return nil, err + code := 0 + if res != nil { + code = res.StatusCode() + } + return nil, code, err } log.Debugf("[alist_v3] response body: %s", res.String()) if res.StatusCode() >= 400 { - return nil, fmt.Errorf("request failed, status: %s", res.Status()) + return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status()) } code := utils.Json.Get(res.Body(), "code").ToInt() if code != 200 { if (code == 401 || code == 403) && !utils.IsBool(retry...) { err = d.login() if err != nil { - return nil, err + return nil, code, err } return d.request(api, method, callback, true) } - return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString()) + return nil, code, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString()) } - return res.Body(), nil + return res.Body(), 200, nil } From 1335f803622308b4c3dabefdc84ab7b19fd7b4ec Mon Sep 17 00:00:00 2001 From: KirCute <951206789@qq.com> Date: Thu, 27 Mar 2025 23:20:44 +0800 Subject: [PATCH 147/187] feat(archive): support multipart archives (#8184 close #8015) * feat(archive): multipart support & sevenzip tool * feat(archive): rardecode tool * feat(archive): support decompress multi-selected * fix(archive): decompress response filter internal * feat(archive): support multipart zip * fix: more applicable AcceptedMultipartExtensions interface --- go.mod | 6 +- internal/archive/all.go | 2 + internal/archive/archives/archives.go | 26 +-- internal/archive/iso9660/iso9660.go | 22 ++- internal/archive/rardecode/rardecode.go | 140 +++++++++++++++ internal/archive/rardecode/utils.go | 225 ++++++++++++++++++++++++ internal/archive/sevenzip/sevenzip.go | 72 ++++++++ internal/archive/sevenzip/utils.go | 61 +++++++ internal/archive/tool/base.go | 14 +- internal/archive/tool/helper.go | 201 +++++++++++++++++++++ internal/archive/tool/utils.go | 17 +- internal/archive/zip/utils.go | 102 +++++------ internal/archive/zip/zip.go | 164 +++-------------- internal/driver/driver.go | 4 +- internal/fs/archive.go | 69 ++++---- internal/op/archive.go | 113 +++++++++--- internal/stream/limit.go | 2 +- internal/stream/stream.go | 29 ++- server/handles/archive.go | 93 ++++++---- 19 files changed, 1042 insertions(+), 320 deletions(-) create mode 100644 internal/archive/rardecode/rardecode.go create mode 100644 internal/archive/rardecode/utils.go create mode 100644 internal/archive/sevenzip/sevenzip.go create mode 100644 internal/archive/sevenzip/utils.go create mode 100644 internal/archive/tool/helper.go diff --git a/go.mod b/go.mod index a06c62ba..5ed8a27b 100644 --- a/go.mod +++ b/go.mod @@ -85,7 +85,7 @@ require ( github.com/blevesearch/go-faiss v1.0.20 // indirect github.com/blevesearch/zapx/v16 v16.1.5 // indirect github.com/bodgit/plumbing v1.3.0 // indirect - github.com/bodgit/sevenzip v1.6.0 // indirect + github.com/bodgit/sevenzip v1.6.0 github.com/bodgit/windows v1.0.1 // indirect github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/charmbracelet/x/ansi v0.2.3 // indirect @@ -106,14 +106,14 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/matoous/go-nanoid/v2 v2.1.0 // indirect github.com/microcosm-cc/bluemonday v1.0.27 - github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect + github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 github.com/sorairolake/lzip-go v0.3.5 // indirect github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect github.com/xhofe/115-sdk-go v0.1.4 github.com/yuin/goldmark v1.7.8 - go4.org v0.0.0-20230225012048-214862532bf5 // indirect + go4.org v0.0.0-20230225012048-214862532bf5 resty.dev/v3 v3.0.0-beta.2 // indirect ) diff --git a/internal/archive/all.go b/internal/archive/all.go index 18167933..63206cb8 100644 --- a/internal/archive/all.go +++ b/internal/archive/all.go @@ -3,5 +3,7 @@ package archive import ( _ "github.com/alist-org/alist/v3/internal/archive/archives" _ "github.com/alist-org/alist/v3/internal/archive/iso9660" + _ "github.com/alist-org/alist/v3/internal/archive/rardecode" + _ "github.com/alist-org/alist/v3/internal/archive/sevenzip" _ "github.com/alist-org/alist/v3/internal/archive/zip" ) diff --git a/internal/archive/archives/archives.go b/internal/archive/archives/archives.go index 6d48624f..0a42cd0c 100644 --- a/internal/archive/archives/archives.go +++ b/internal/archive/archives/archives.go @@ -16,14 +16,18 @@ import ( type Archives struct { } -func (*Archives) AcceptedExtensions() []string { +func (Archives) AcceptedExtensions() []string { return []string{ - ".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z", + ".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", } } -func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { - fsys, err := getFs(ss, args) +func (Archives) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{} +} + +func (Archives) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + fsys, err := getFs(ss[0], args) if err != nil { return nil, err } @@ -47,8 +51,8 @@ func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (mod }, nil } -func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { - fsys, err := getFs(ss, args.ArchiveArgs) +func (Archives) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + fsys, err := getFs(ss[0], args.ArchiveArgs) if err != nil { return nil, err } @@ -69,8 +73,8 @@ func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([ }) } -func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { - fsys, err := getFs(ss, args.ArchiveArgs) +func (Archives) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + fsys, err := getFs(ss[0], args.ArchiveArgs) if err != nil { return nil, 0, err } @@ -85,8 +89,8 @@ func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) return file, stat.Size(), nil } -func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { - fsys, err := getFs(ss, args.ArchiveArgs) +func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + fsys, err := getFs(ss[0], args.ArchiveArgs) if err != nil { return err } @@ -133,5 +137,5 @@ func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args m var _ tool.Tool = (*Archives)(nil) func init() { - tool.RegisterTool(&Archives{}) + tool.RegisterTool(Archives{}) } diff --git a/internal/archive/iso9660/iso9660.go b/internal/archive/iso9660/iso9660.go index e9cb3f53..be107d7b 100644 --- a/internal/archive/iso9660/iso9660.go +++ b/internal/archive/iso9660/iso9660.go @@ -14,19 +14,23 @@ import ( type ISO9660 struct { } -func (t *ISO9660) AcceptedExtensions() []string { +func (ISO9660) AcceptedExtensions() []string { return []string{".iso"} } -func (t *ISO9660) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { +func (ISO9660) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{} +} + +func (ISO9660) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { return &model.ArchiveMetaInfo{ Comment: "", Encrypted: false, }, nil } -func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { - img, err := getImage(ss) +func (ISO9660) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + img, err := getImage(ss[0]) if err != nil { return nil, err } @@ -48,8 +52,8 @@ func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ( return ret, nil } -func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { - img, err := getImage(ss) +func (ISO9660) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + img, err := getImage(ss[0]) if err != nil { return nil, 0, err } @@ -63,8 +67,8 @@ func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs return io.NopCloser(obj.Reader()), obj.Size(), nil } -func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { - img, err := getImage(ss) +func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + img, err := getImage(ss[0]) if err != nil { return err } @@ -92,5 +96,5 @@ func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args var _ tool.Tool = (*ISO9660)(nil) func init() { - tool.RegisterTool(&ISO9660{}) + tool.RegisterTool(ISO9660{}) } diff --git a/internal/archive/rardecode/rardecode.go b/internal/archive/rardecode/rardecode.go new file mode 100644 index 00000000..cd31d1a4 --- /dev/null +++ b/internal/archive/rardecode/rardecode.go @@ -0,0 +1,140 @@ +package rardecode + +import ( + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/nwaples/rardecode/v2" + "io" + "os" + stdpath "path" + "strings" +) + +type RarDecoder struct{} + +func (RarDecoder) AcceptedExtensions() []string { + return []string{".rar"} +} + +func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{ + ".part1.rar": {".part%d.rar", 2}, + } +} + +func (RarDecoder) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + l, err := list(ss, args.Password) + if err != nil { + return nil, err + } + _, tree := tool.GenerateMetaTreeFromFolderTraversal(l) + return &model.ArchiveMetaInfo{ + Comment: "", + Encrypted: false, + Tree: tree, + }, nil +} + +func (RarDecoder) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + return nil, errs.NotSupport +} + +func (RarDecoder) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + reader, err := getReader(ss, args.Password) + if err != nil { + return nil, 0, err + } + innerPath := strings.TrimPrefix(args.InnerPath, "/") + for { + var header *rardecode.FileHeader + header, err = reader.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, 0, err + } + if header.Name == innerPath { + if header.IsDir { + break + } + return io.NopCloser(reader), header.UnPackedSize, nil + } + } + return nil, 0, errs.ObjectNotFound +} + +func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + reader, err := getReader(ss, args.Password) + if err != nil { + return err + } + if args.InnerPath == "/" { + for { + var header *rardecode.FileHeader + header, err = reader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + name := header.Name + if header.IsDir { + name = name + "/" + } + err = decompress(reader, header, name, outputPath) + if err != nil { + return err + } + } + } else { + innerPath := strings.TrimPrefix(args.InnerPath, "/") + innerBase := stdpath.Base(innerPath) + createdBaseDir := false + for { + var header *rardecode.FileHeader + header, err = reader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + name := header.Name + if header.IsDir { + name = name + "/" + } + if name == innerPath { + err = _decompress(reader, header, outputPath, up) + if err != nil { + return err + } + break + } else if strings.HasPrefix(name, innerPath+"/") { + targetPath := stdpath.Join(outputPath, innerBase) + if !createdBaseDir { + err = os.Mkdir(targetPath, 0700) + if err != nil { + return err + } + createdBaseDir = true + } + restPath := strings.TrimPrefix(name, innerPath+"/") + err = decompress(reader, header, restPath, targetPath) + if err != nil { + return err + } + } + } + } + return nil +} + +var _ tool.Tool = (*RarDecoder)(nil) + +func init() { + tool.RegisterTool(RarDecoder{}) +} diff --git a/internal/archive/rardecode/utils.go b/internal/archive/rardecode/utils.go new file mode 100644 index 00000000..5790ec58 --- /dev/null +++ b/internal/archive/rardecode/utils.go @@ -0,0 +1,225 @@ +package rardecode + +import ( + "fmt" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/nwaples/rardecode/v2" + "io" + "io/fs" + "os" + stdpath "path" + "sort" + "strings" + "time" +) + +type VolumeFile struct { + stream.SStreamReadAtSeeker + name string +} + +func (v *VolumeFile) Name() string { + return v.name +} + +func (v *VolumeFile) Size() int64 { + return v.SStreamReadAtSeeker.GetRawStream().GetSize() +} + +func (v *VolumeFile) Mode() fs.FileMode { + return 0644 +} + +func (v *VolumeFile) ModTime() time.Time { + return v.SStreamReadAtSeeker.GetRawStream().ModTime() +} + +func (v *VolumeFile) IsDir() bool { + return false +} + +func (v *VolumeFile) Sys() any { + return nil +} + +func (v *VolumeFile) Stat() (fs.FileInfo, error) { + return v, nil +} + +func (v *VolumeFile) Close() error { + return nil +} + +type VolumeFs struct { + parts map[string]*VolumeFile +} + +func (v *VolumeFs) Open(name string) (fs.File, error) { + file, ok := v.parts[name] + if !ok { + return nil, fs.ErrNotExist + } + return file, nil +} + +func makeOpts(ss []*stream.SeekableStream) (string, rardecode.Option, error) { + if len(ss) == 1 { + reader, err := stream.NewReadAtSeeker(ss[0], 0) + if err != nil { + return "", nil, err + } + fileName := "file.rar" + fsys := &VolumeFs{parts: map[string]*VolumeFile{ + fileName: {SStreamReadAtSeeker: reader, name: fileName}, + }} + return fileName, rardecode.FileSystem(fsys), nil + } else { + parts := make(map[string]*VolumeFile, len(ss)) + for i, s := range ss { + reader, err := stream.NewReadAtSeeker(s, 0) + if err != nil { + return "", nil, err + } + fileName := fmt.Sprintf("file.part%d.rar", i+1) + parts[fileName] = &VolumeFile{SStreamReadAtSeeker: reader, name: fileName} + } + return "file.part1.rar", rardecode.FileSystem(&VolumeFs{parts: parts}), nil + } +} + +type WrapReader struct { + files []*rardecode.File +} + +func (r *WrapReader) Files() []tool.SubFile { + ret := make([]tool.SubFile, 0, len(r.files)) + for _, f := range r.files { + ret = append(ret, &WrapFile{File: f}) + } + return ret +} + +type WrapFile struct { + *rardecode.File +} + +func (f *WrapFile) Name() string { + if f.File.IsDir { + return f.File.Name + "/" + } + return f.File.Name +} + +func (f *WrapFile) FileInfo() fs.FileInfo { + return &WrapFileInfo{File: f.File} +} + +type WrapFileInfo struct { + *rardecode.File +} + +func (f *WrapFileInfo) Name() string { + return stdpath.Base(f.File.Name) +} + +func (f *WrapFileInfo) Size() int64 { + return f.File.UnPackedSize +} + +func (f *WrapFileInfo) ModTime() time.Time { + return f.File.ModificationTime +} + +func (f *WrapFileInfo) IsDir() bool { + return f.File.IsDir +} + +func (f *WrapFileInfo) Sys() any { + return nil +} + +func list(ss []*stream.SeekableStream, password string) (*WrapReader, error) { + fileName, fsOpt, err := makeOpts(ss) + if err != nil { + return nil, err + } + opts := []rardecode.Option{fsOpt} + if password != "" { + opts = append(opts, rardecode.Password(password)) + } + files, err := rardecode.List(fileName, opts...) + // rardecode输出文件列表的顺序不一定是父目录在前,子目录在后 + // 父路径的长度一定比子路径短,排序后的files可保证父路径在前 + sort.Slice(files, func(i, j int) bool { + return len(files[i].Name) < len(files[j].Name) + }) + if err != nil { + return nil, filterPassword(err) + } + return &WrapReader{files: files}, nil +} + +func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader, error) { + fileName, fsOpt, err := makeOpts(ss) + if err != nil { + return nil, err + } + opts := []rardecode.Option{fsOpt} + if password != "" { + opts = append(opts, rardecode.Password(password)) + } + rc, err := rardecode.OpenReader(fileName, opts...) + if err != nil { + return nil, filterPassword(err) + } + ss[0].Closers.Add(rc) + return &rc.Reader, nil +} + +func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error { + targetPath := outputPath + dir, base := stdpath.Split(filePath) + if dir != "" { + targetPath = stdpath.Join(targetPath, dir) + err := os.MkdirAll(targetPath, 0700) + if err != nil { + return err + } + } + if base != "" { + err := _decompress(reader, header, targetPath, func(_ float64) {}) + if err != nil { + return err + } + } + return nil +} + +func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error { + f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + _, err = io.Copy(f, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: reader, + Size: header.UnPackedSize, + }, + UpdateProgress: up, + }) + if err != nil { + return err + } + return nil +} + +func filterPassword(err error) error { + if err != nil && strings.Contains(err.Error(), "password") { + return errs.WrongArchivePassword + } + return err +} diff --git a/internal/archive/sevenzip/sevenzip.go b/internal/archive/sevenzip/sevenzip.go new file mode 100644 index 00000000..28169966 --- /dev/null +++ b/internal/archive/sevenzip/sevenzip.go @@ -0,0 +1,72 @@ +package sevenzip + +import ( + "io" + "strings" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" +) + +type SevenZip struct{} + +func (SevenZip) AcceptedExtensions() []string { + return []string{".7z"} +} + +func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{ + ".7z.001": {".7z.%.3d", 2}, + } +} + +func (SevenZip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + reader, err := getReader(ss, args.Password) + if err != nil { + return nil, err + } + _, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: reader}) + return &model.ArchiveMetaInfo{ + Comment: "", + Encrypted: args.Password != "", + Tree: tree, + }, nil +} + +func (SevenZip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + return nil, errs.NotSupport +} + +func (SevenZip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + reader, err := getReader(ss, args.Password) + if err != nil { + return nil, 0, err + } + innerPath := strings.TrimPrefix(args.InnerPath, "/") + for _, file := range reader.File { + if file.Name == innerPath { + r, e := file.Open() + if e != nil { + return nil, 0, e + } + return r, file.FileInfo().Size(), nil + } + } + return nil, 0, errs.ObjectNotFound +} + +func (SevenZip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + reader, err := getReader(ss, args.Password) + if err != nil { + return err + } + return tool.DecompressFromFolderTraversal(&WrapReader{Reader: reader}, outputPath, args, up) +} + +var _ tool.Tool = (*SevenZip)(nil) + +func init() { + tool.RegisterTool(SevenZip{}) +} diff --git a/internal/archive/sevenzip/utils.go b/internal/archive/sevenzip/utils.go new file mode 100644 index 00000000..624ba187 --- /dev/null +++ b/internal/archive/sevenzip/utils.go @@ -0,0 +1,61 @@ +package sevenzip + +import ( + "errors" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/bodgit/sevenzip" + "io" + "io/fs" +) + +type WrapReader struct { + Reader *sevenzip.Reader +} + +func (r *WrapReader) Files() []tool.SubFile { + ret := make([]tool.SubFile, 0, len(r.Reader.File)) + for _, f := range r.Reader.File { + ret = append(ret, &WrapFile{f: f}) + } + return ret +} + +type WrapFile struct { + f *sevenzip.File +} + +func (f *WrapFile) Name() string { + return f.f.Name +} + +func (f *WrapFile) FileInfo() fs.FileInfo { + return f.f.FileInfo() +} + +func (f *WrapFile) Open() (io.ReadCloser, error) { + return f.f.Open() +} + +func getReader(ss []*stream.SeekableStream, password string) (*sevenzip.Reader, error) { + readerAt, err := stream.NewMultiReaderAt(ss) + if err != nil { + return nil, err + } + sr, err := sevenzip.NewReaderWithPassword(readerAt, readerAt.Size(), password) + if err != nil { + return nil, filterPassword(err) + } + return sr, nil +} + +func filterPassword(err error) error { + if err != nil { + var e *sevenzip.ReadError + if errors.As(err, &e) && e.Encrypted { + return errs.WrongArchivePassword + } + } + return err +} diff --git a/internal/archive/tool/base.go b/internal/archive/tool/base.go index 08e96614..8f5b10d9 100644 --- a/internal/archive/tool/base.go +++ b/internal/archive/tool/base.go @@ -6,10 +6,16 @@ import ( "io" ) +type MultipartExtension struct { + PartFileFormat string + SecondPartIndex int +} + type Tool interface { AcceptedExtensions() []string - GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) - List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) - Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) - Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error + AcceptedMultipartExtensions() map[string]MultipartExtension + GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) + List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) + Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) + Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error } diff --git a/internal/archive/tool/helper.go b/internal/archive/tool/helper.go new file mode 100644 index 00000000..8f71900a --- /dev/null +++ b/internal/archive/tool/helper.go @@ -0,0 +1,201 @@ +package tool + +import ( + "io" + "io/fs" + "os" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" +) + +type SubFile interface { + Name() string + FileInfo() fs.FileInfo + Open() (io.ReadCloser, error) +} + +type CanEncryptSubFile interface { + IsEncrypted() bool + SetPassword(password string) +} + +type ArchiveReader interface { + Files() []SubFile +} + +func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree) { + encrypted := false + dirMap := make(map[string]*model.ObjectTree) + dirMap["."] = &model.ObjectTree{} + for _, file := range r.Files() { + if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() { + encrypted = true + } + + name := strings.TrimPrefix(file.Name(), "/") + var dir string + var dirObj *model.ObjectTree + isNewFolder := false + if !file.FileInfo().IsDir() { + // 先将 文件 添加到 所在的文件夹 + dir = stdpath.Dir(name) + dirObj = dirMap[dir] + if dirObj == nil { + isNewFolder = true + dirObj = &model.ObjectTree{} + dirObj.IsFolder = true + dirObj.Name = stdpath.Base(dir) + dirObj.Modified = file.FileInfo().ModTime() + dirMap[dir] = dirObj + } + dirObj.Children = append( + dirObj.Children, &model.ObjectTree{ + Object: *MakeModelObj(file.FileInfo()), + }, + ) + } else { + dir = strings.TrimSuffix(name, "/") + dirObj = dirMap[dir] + if dirObj == nil { + isNewFolder = true + dirObj = &model.ObjectTree{} + dirMap[dir] = dirObj + } + dirObj.IsFolder = true + dirObj.Name = stdpath.Base(dir) + dirObj.Modified = file.FileInfo().ModTime() + dirObj.Children = make([]model.ObjTree, 0) + } + if isNewFolder { + // 将 文件夹 添加到 父文件夹 + dir = stdpath.Dir(dir) + pDirObj := dirMap[dir] + if pDirObj != nil { + pDirObj.Children = append(pDirObj.Children, dirObj) + continue + } + + for { + // 考虑压缩包仅记录文件的路径,不记录文件夹 + pDirObj = &model.ObjectTree{} + pDirObj.IsFolder = true + pDirObj.Name = stdpath.Base(dir) + pDirObj.Modified = file.FileInfo().ModTime() + dirMap[dir] = pDirObj + pDirObj.Children = append(pDirObj.Children, dirObj) + dir = stdpath.Dir(dir) + if dirMap[dir] != nil { + break + } + dirObj = pDirObj + } + } + } + return encrypted, dirMap["."].GetChildren() +} + +func MakeModelObj(file os.FileInfo) *model.Object { + return &model.Object{ + Name: file.Name(), + Size: file.Size(), + Modified: file.ModTime(), + IsFolder: file.IsDir(), + } +} + +type WrapFileInfo struct { + model.Obj +} + +func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + var err error + files := r.Files() + if args.InnerPath == "/" { + for i, file := range files { + name := file.Name() + err = decompress(file, name, outputPath, args.Password) + if err != nil { + return err + } + up(float64(i+1) * 100.0 / float64(len(files))) + } + } else { + innerPath := strings.TrimPrefix(args.InnerPath, "/") + innerBase := stdpath.Base(innerPath) + createdBaseDir := false + for _, file := range files { + name := file.Name() + if name == innerPath { + err = _decompress(file, outputPath, args.Password, up) + if err != nil { + return err + } + break + } else if strings.HasPrefix(name, innerPath+"/") { + targetPath := stdpath.Join(outputPath, innerBase) + if !createdBaseDir { + err = os.Mkdir(targetPath, 0700) + if err != nil { + return err + } + createdBaseDir = true + } + restPath := strings.TrimPrefix(name, innerPath+"/") + err = decompress(file, restPath, targetPath, args.Password) + if err != nil { + return err + } + } + } + } + return nil +} + +func decompress(file SubFile, filePath, outputPath, password string) error { + targetPath := outputPath + dir, base := stdpath.Split(filePath) + if dir != "" { + targetPath = stdpath.Join(targetPath, dir) + err := os.MkdirAll(targetPath, 0700) + if err != nil { + return err + } + } + if base != "" { + err := _decompress(file, targetPath, password, func(_ float64) {}) + if err != nil { + return err + } + } + return nil +} + +func _decompress(file SubFile, targetPath, password string, up model.UpdateProgress) error { + if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() { + encrypt.SetPassword(password) + } + rc, err := file.Open() + if err != nil { + return err + } + defer func() { _ = rc.Close() }() + f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + _, err = io.Copy(f, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: rc, + Size: file.FileInfo().Size(), + }, + UpdateProgress: up, + }) + if err != nil { + return err + } + return nil +} diff --git a/internal/archive/tool/utils.go b/internal/archive/tool/utils.go index 822ee894..aa92cb1d 100644 --- a/internal/archive/tool/utils.go +++ b/internal/archive/tool/utils.go @@ -5,19 +5,28 @@ import ( ) var ( - Tools = make(map[string]Tool) + Tools = make(map[string]Tool) + MultipartExtensions = make(map[string]MultipartExtension) ) func RegisterTool(tool Tool) { for _, ext := range tool.AcceptedExtensions() { Tools[ext] = tool } + for mainFile, ext := range tool.AcceptedMultipartExtensions() { + MultipartExtensions[mainFile] = ext + Tools[mainFile] = tool + } } -func GetArchiveTool(ext string) (Tool, error) { +func GetArchiveTool(ext string) (*MultipartExtension, Tool, error) { t, ok := Tools[ext] if !ok { - return nil, errs.UnknownArchiveFormat + return nil, nil, errs.UnknownArchiveFormat } - return t, nil + partExt, ok := MultipartExtensions[ext] + if !ok { + return nil, t, nil + } + return &partExt, t, nil } diff --git a/internal/archive/zip/utils.go b/internal/archive/zip/utils.go index aa51b88e..59f4ed51 100644 --- a/internal/archive/zip/utils.go +++ b/internal/archive/zip/utils.go @@ -2,8 +2,13 @@ package zip import ( "bytes" + "io" + "io/fs" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/archive/tool" "github.com/alist-org/alist/v3/internal/errs" - "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/stream" "github.com/saintfish/chardet" "github.com/yeka/zip" @@ -16,65 +21,62 @@ import ( "golang.org/x/text/encoding/unicode" "golang.org/x/text/encoding/unicode/utf32" "golang.org/x/text/transform" - "io" - "os" - stdpath "path" - "strings" ) -func toModelObj(file os.FileInfo) *model.Object { - return &model.Object{ - Name: decodeName(file.Name()), - Size: file.Size(), - Modified: file.ModTime(), - IsFolder: file.IsDir(), - } +type WrapReader struct { + Reader *zip.Reader } -func decompress(file *zip.File, filePath, outputPath, password string) error { - targetPath := outputPath - dir, base := stdpath.Split(filePath) - if dir != "" { - targetPath = stdpath.Join(targetPath, dir) - err := os.MkdirAll(targetPath, 0700) - if err != nil { - return err - } +func (r *WrapReader) Files() []tool.SubFile { + ret := make([]tool.SubFile, 0, len(r.Reader.File)) + for _, f := range r.Reader.File { + ret = append(ret, &WrapFile{f: f}) } - if base != "" { - err := _decompress(file, targetPath, password, func(_ float64) {}) - if err != nil { - return err - } - } - return nil + return ret } -func _decompress(file *zip.File, targetPath, password string, up model.UpdateProgress) error { - if file.IsEncrypted() { - file.SetPassword(password) +type WrapFileInfo struct { + fs.FileInfo +} + +func (f *WrapFileInfo) Name() string { + return decodeName(f.FileInfo.Name()) +} + +type WrapFile struct { + f *zip.File +} + +func (f *WrapFile) Name() string { + return decodeName(f.f.Name) +} + +func (f *WrapFile) FileInfo() fs.FileInfo { + return &WrapFileInfo{FileInfo: f.f.FileInfo()} +} + +func (f *WrapFile) Open() (io.ReadCloser, error) { + return f.f.Open() +} + +func (f *WrapFile) IsEncrypted() bool { + return f.f.IsEncrypted() +} + +func (f *WrapFile) SetPassword(password string) { + f.f.SetPassword(password) +} + +func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) { + if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" { + // FIXME: Incorrect parsing method for standard multipart zip format + ss = append(ss[1:], ss[0]) } - rc, err := file.Open() + reader, err := stream.NewMultiReaderAt(ss) if err != nil { - return err + return nil, err } - defer rc.Close() - f, err := os.OpenFile(stdpath.Join(targetPath, decodeName(file.FileInfo().Name())), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return err - } - defer f.Close() - _, err = io.Copy(f, &stream.ReaderUpdatingProgress{ - Reader: &stream.SimpleReaderWithSize{ - Reader: rc, - Size: file.FileInfo().Size(), - }, - UpdateProgress: up, - }) - if err != nil { - return err - } - return nil + return zip.NewReader(reader, reader.Size()) } func filterPassword(err error) error { diff --git a/internal/archive/zip/zip.go b/internal/archive/zip/zip.go index 9dc8cc76..6e23570c 100644 --- a/internal/archive/zip/zip.go +++ b/internal/archive/zip/zip.go @@ -2,7 +2,6 @@ package zip import ( "io" - "os" stdpath "path" "strings" @@ -10,106 +9,37 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/stream" - "github.com/yeka/zip" ) type Zip struct { } -func (*Zip) AcceptedExtensions() []string { - return []string{".zip"} +func (Zip) AcceptedExtensions() []string { + return []string{} } -func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { - reader, err := stream.NewReadAtSeeker(ss, 0) +func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{ + ".zip": {".z%.2d", 1}, + ".zip.001": {".zip.%.3d", 2}, + } +} + +func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + zipReader, err := getReader(ss) if err != nil { return nil, err } - zipReader, err := zip.NewReader(reader, ss.GetSize()) - if err != nil { - return nil, err - } - encrypted := false - dirMap := make(map[string]*model.ObjectTree) - dirMap["."] = &model.ObjectTree{} - for _, file := range zipReader.File { - if file.IsEncrypted() { - encrypted = true - } - - name := strings.TrimPrefix(decodeName(file.Name), "/") - var dir string - var dirObj *model.ObjectTree - isNewFolder := false - if !file.FileInfo().IsDir() { - // 先将 文件 添加到 所在的文件夹 - dir = stdpath.Dir(name) - dirObj = dirMap[dir] - if dirObj == nil { - isNewFolder = true - dirObj = &model.ObjectTree{} - dirObj.IsFolder = true - dirObj.Name = stdpath.Base(dir) - dirObj.Modified = file.ModTime() - dirMap[dir] = dirObj - } - dirObj.Children = append( - dirObj.Children, &model.ObjectTree{ - Object: *toModelObj(file.FileInfo()), - }, - ) - } else { - dir = strings.TrimSuffix(name, "/") - dirObj = dirMap[dir] - if dirObj == nil { - isNewFolder = true - dirObj = &model.ObjectTree{} - dirMap[dir] = dirObj - } - dirObj.IsFolder = true - dirObj.Name = stdpath.Base(dir) - dirObj.Modified = file.ModTime() - dirObj.Children = make([]model.ObjTree, 0) - } - if isNewFolder { - // 将 文件夹 添加到 父文件夹 - dir = stdpath.Dir(dir) - pDirObj := dirMap[dir] - if pDirObj != nil { - pDirObj.Children = append(pDirObj.Children, dirObj) - continue - } - - for { - // 考虑压缩包仅记录文件的路径,不记录文件夹 - pDirObj = &model.ObjectTree{} - pDirObj.IsFolder = true - pDirObj.Name = stdpath.Base(dir) - pDirObj.Modified = file.ModTime() - dirMap[dir] = pDirObj - pDirObj.Children = append(pDirObj.Children, dirObj) - dir = stdpath.Dir(dir) - if dirMap[dir] != nil { - break - } - dirObj = pDirObj - } - } - } - + encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader}) return &model.ArchiveMetaInfo{ Comment: zipReader.Comment, Encrypted: encrypted, - Tree: dirMap["."].GetChildren(), + Tree: tree, }, nil } -func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { - reader, err := stream.NewReadAtSeeker(ss, 0) - if err != nil { - return nil, err - } - zipReader, err := zip.NewReader(reader, ss.GetSize()) +func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + zipReader, err := getReader(ss) if err != nil { return nil, err } @@ -134,13 +64,13 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode if dir == nil && len(strs) == 2 { dir = &model.Object{ Name: strs[0], - Modified: ss.ModTime(), + Modified: ss[0].ModTime(), IsFolder: true, } } continue } - ret = append(ret, toModelObj(file.FileInfo())) + ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()})) } if len(ret) == 0 && dir != nil { ret = append(ret, dir) @@ -157,7 +87,7 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode continue } exist = true - ret = append(ret, toModelObj(file.FileInfo())) + ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()})) } if !exist { return nil, errs.ObjectNotFound @@ -166,12 +96,8 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode } } -func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { - reader, err := stream.NewReadAtSeeker(ss, 0) - if err != nil { - return nil, 0, err - } - zipReader, err := zip.NewReader(reader, ss.GetSize()) +func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + zipReader, err := getReader(ss) if err != nil { return nil, 0, err } @@ -191,58 +117,16 @@ func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io. return nil, 0, errs.ObjectNotFound } -func (*Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { - reader, err := stream.NewReadAtSeeker(ss, 0) +func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + zipReader, err := getReader(ss) if err != nil { return err } - zipReader, err := zip.NewReader(reader, ss.GetSize()) - if err != nil { - return err - } - if args.InnerPath == "/" { - for i, file := range zipReader.File { - name := decodeName(file.Name) - err = decompress(file, name, outputPath, args.Password) - if err != nil { - return err - } - up(float64(i+1) * 100.0 / float64(len(zipReader.File))) - } - } else { - innerPath := strings.TrimPrefix(args.InnerPath, "/") - innerBase := stdpath.Base(innerPath) - createdBaseDir := false - for _, file := range zipReader.File { - name := decodeName(file.Name) - if name == innerPath { - err = _decompress(file, outputPath, args.Password, up) - if err != nil { - return err - } - break - } else if strings.HasPrefix(name, innerPath+"/") { - targetPath := stdpath.Join(outputPath, innerBase) - if !createdBaseDir { - err = os.Mkdir(targetPath, 0700) - if err != nil { - return err - } - createdBaseDir = true - } - restPath := strings.TrimPrefix(name, innerPath+"/") - err = decompress(file, restPath, targetPath, args.Password) - if err != nil { - return err - } - } - } - } - return nil + return tool.DecompressFromFolderTraversal(&WrapReader{Reader: zipReader}, outputPath, args, up) } var _ tool.Tool = (*Zip)(nil) func init() { - tool.RegisterTool(&Zip{}) + tool.RegisterTool(Zip{}) } diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 05f0fe24..9e9440b6 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -79,13 +79,13 @@ type Remove interface { type Put interface { // Put a file (provided as a FileStreamer) into the driver // Besides the most basic upload functionality, the following features also need to be implemented: - // 1. Canceling (when `<-ctx.Done()` returns), by the following methods: + // 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods: // (1) Use request methods that carry context, such as the following: // a. http.NewRequestWithContext // b. resty.Request.SetContext // c. s3manager.Uploader.UploadWithContext // d. utils.CopyWithCtx - // (2) Use a `driver.ReaderWithCtx` or a `driver.NewLimitedUploadStream` + // (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream` // (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process, // this is typically applicable to chunked uploads. // 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows: diff --git a/internal/fs/archive.go b/internal/fs/archive.go index 39131827..b056decf 100644 --- a/internal/fs/archive.go +++ b/internal/fs/archive.go @@ -4,17 +4,6 @@ import ( "context" stderrors "errors" "fmt" - "github.com/alist-org/alist/v3/internal/archive/tool" - "github.com/alist-org/alist/v3/internal/conf" - "github.com/alist-org/alist/v3/internal/driver" - "github.com/alist-org/alist/v3/internal/errs" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/op" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/internal/task" - "github.com/pkg/errors" - log "github.com/sirupsen/logrus" - "github.com/xhofe/tache" "io" "math/rand" "mime" @@ -25,6 +14,17 @@ import ( "strconv" "strings" "time" + + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/xhofe/tache" ) type ArchiveDownloadTask struct { @@ -37,7 +37,6 @@ type ArchiveDownloadTask struct { dstStorage driver.Driver SrcStorageMp string DstStorageMp string - Tool tool.Tool } func (t *ArchiveDownloadTask) GetName() string { @@ -67,33 +66,39 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT if t.srcStorage == nil { t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp) } - l, srcObj, err := op.Link(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{ + srcObj, tool, ss, err := op.GetArchiveToolAndStream(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{ Header: http.Header{}, }) if err != nil { return nil, err } - fs := stream.FileStream{ - Obj: srcObj, - Ctx: t.Ctx(), - } - ss, err := stream.NewSeekableStream(fs, l) - if err != nil { - return nil, err - } defer func() { - if err := ss.Close(); err != nil { - log.Errorf("failed to close file streamer, %v", err) + var e error + for _, s := range ss { + e = stderrors.Join(e, s.Close()) + } + if e != nil { + log.Errorf("failed to close file streamer, %v", e) } }() var decompressUp model.UpdateProgress if t.CacheFull { - t.SetTotalBytes(srcObj.GetSize()) - t.status = "getting src object" - _, err = ss.CacheFullInTempFileAndUpdateProgress(t.SetProgress) - if err != nil { - return nil, err + var total, cur int64 = 0, 0 + for _, s := range ss { + total += s.GetSize() } + t.SetTotalBytes(total) + t.status = "getting src object" + for _, s := range ss { + _, err = s.CacheFullInTempFileAndUpdateProgress(func(p float64) { + t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total)) + }) + cur += s.GetSize() + if err != nil { + return nil, err + } + } + t.SetProgress(100.0) decompressUp = func(_ float64) {} } else { decompressUp = t.SetProgress @@ -103,7 +108,7 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT if err != nil { return nil, err } - err = t.Tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp) + err = tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp) if err != nil { return nil, err } @@ -344,11 +349,6 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args return nil, err } } - ext := stdpath.Ext(srcObjActualPath) - t, err := tool.GetArchiveTool(ext) - if err != nil { - return nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext) - } taskCreator, _ := ctx.Value("user").(*model.User) tsk := &ArchiveDownloadTask{ TaskExtension: task.TaskExtension{ @@ -361,7 +361,6 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args DstDirPath: dstDirActualPath, SrcStorageMp: srcStorage.GetStorage().MountPath, DstStorageMp: dstStorage.GetStorage().MountPath, - Tool: t, } if ctx.Value(conf.NoTaskKey) != nil { uploadTask, err := tsk.RunWithoutPushUploadTask() diff --git a/internal/op/archive.go b/internal/op/archive.go index 4015e299..38b870c7 100644 --- a/internal/op/archive.go +++ b/internal/op/archive.go @@ -3,6 +3,7 @@ package op import ( "context" stderrors "errors" + "fmt" "io" stdpath "path" "strings" @@ -54,21 +55,76 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg return meta, err } -func getArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, *stream.SeekableStream, error) { +func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, []*stream.SeekableStream, error) { l, obj, err := Link(ctx, storage, path, args) if err != nil { return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path) } - ext := stdpath.Ext(obj.GetName()) - t, err := tool.GetArchiveTool(ext) + baseName, ext, found := strings.Cut(obj.GetName(), ".") + if !found { + if l.MFile != nil { + _ = l.MFile.Close() + } + if l.RangeReadCloser != nil { + _ = l.RangeReadCloser.Close() + } + return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.") + } + partExt, t, err := tool.GetArchiveTool("." + ext) if err != nil { - return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext) + var e error + partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName())) + if e != nil { + if l.MFile != nil { + _ = l.MFile.Close() + } + if l.RangeReadCloser != nil { + _ = l.RangeReadCloser.Close() + } + return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext) + } } ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l) if err != nil { + if l.MFile != nil { + _ = l.MFile.Close() + } + if l.RangeReadCloser != nil { + _ = l.RangeReadCloser.Close() + } return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path) } - return obj, t, ss, nil + ret := []*stream.SeekableStream{ss} + if partExt == nil { + return obj, t, ret, nil + } else { + index := partExt.SecondPartIndex + dir := stdpath.Dir(path) + for { + p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index)) + var o model.Obj + l, o, err = Link(ctx, storage, p, args) + if err != nil { + break + } + ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l) + if err != nil { + if l.MFile != nil { + _ = l.MFile.Close() + } + if l.RangeReadCloser != nil { + _ = l.RangeReadCloser.Close() + } + for _, s := range ret { + _ = s.Close() + } + return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path) + } + ret = append(ret, ss) + index++ + } + return obj, t, ret, nil + } } func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) { @@ -94,13 +150,17 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg return obj, archiveMetaProvider, err } } - obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs) if err != nil { return nil, nil, err } defer func() { - if err := ss.Close(); err != nil { - log.Errorf("failed to close file streamer, %v", err) + var e error + for _, s := range ss { + e = stderrors.Join(e, s.Close()) + } + if e != nil { + log.Errorf("failed to close file streamer, %v", e) } }() meta, err := t.GetMeta(ss, args.ArchiveArgs) @@ -114,9 +174,9 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg if !storage.Config().NoCache { Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) archiveMetaProvider.Expiration = &Expiration - } else if ss.Link.MFile == nil { + } else if ss[0].Link.MFile == nil { // alias、crypt 驱动 - archiveMetaProvider.Expiration = ss.Link.Expiration + archiveMetaProvider.Expiration = ss[0].Link.Expiration } return obj, archiveMetaProvider, err } @@ -188,13 +248,17 @@ func _listArchive(ctx context.Context, storage driver.Driver, path string, args return obj, files, err } } - obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs) if err != nil { return nil, nil, err } defer func() { - if err := ss.Close(); err != nil { - log.Errorf("failed to close file streamer, %v", err) + var e error + for _, s := range ss { + e = stderrors.Join(e, s.Close()) + } + if e != nil { + log.Errorf("failed to close file streamer, %v", e) } }() files, err := t.List(ss, args.ArchiveInnerArgs) @@ -378,8 +442,8 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args } type streamWithParent struct { - rc io.ReadCloser - parent *stream.SeekableStream + rc io.ReadCloser + parents []*stream.SeekableStream } func (s *streamWithParent) Read(p []byte) (int, error) { @@ -387,24 +451,31 @@ func (s *streamWithParent) Read(p []byte) (int, error) { } func (s *streamWithParent) Close() error { - err1 := s.rc.Close() - err2 := s.parent.Close() - return stderrors.Join(err1, err2) + err := s.rc.Close() + for _, ss := range s.parents { + err = stderrors.Join(err, ss.Close()) + } + return err } func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { - _, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + _, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs) if err != nil { return nil, 0, err } rc, size, err := t.Extract(ss, args) if err != nil { - if e := ss.Close(); e != nil { + var e error + for _, s := range ss { + e = stderrors.Join(e, s.Close()) + } + if e != nil { log.Errorf("failed to close file streamer, %v", e) + err = stderrors.Join(err, e) } return nil, 0, err } - return &streamWithParent{rc: rc, parent: ss}, size, nil + return &streamWithParent{rc: rc, parents: ss}, size, nil } func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error { diff --git a/internal/stream/limit.go b/internal/stream/limit.go index 3b32a55f..14d0efd0 100644 --- a/internal/stream/limit.go +++ b/internal/stream/limit.go @@ -139,7 +139,7 @@ type RateLimitRangeReadCloser struct { Limiter Limiter } -func (rrc RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { +func (rrc *RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange) if err != nil { return nil, err diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 1c94715f..f6b045a0 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -14,6 +14,7 @@ import ( "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" "github.com/sirupsen/logrus" + "go4.org/readerutil" ) type FileStream struct { @@ -159,6 +160,10 @@ var _ model.FileStreamer = (*FileStream)(nil) //var _ seekableStream = (*FileStream)(nil) // for most internal stream, which is either RangeReadCloser or MFile +// Any functionality implemented based on SeekableStream should implement a Close method, +// whose only purpose is to close the SeekableStream object. If such functionality has +// additional resources that need to be closed, they should be added to the Closer property of +// the SeekableStream object and be closed together when the SeekableStream object is closed. type SeekableStream struct { FileStream Link *model.Link @@ -196,7 +201,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) return &ss, nil } if ss.Link.RangeReadCloser != nil { - ss.rangeReadCloser = RateLimitRangeReadCloser{ + ss.rangeReadCloser = &RateLimitRangeReadCloser{ RangeReadCloserIF: ss.Link.RangeReadCloser, Limiter: ServerDownloadLimit, } @@ -208,7 +213,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) if err != nil { return nil, err } - rrc = RateLimitRangeReadCloser{ + rrc = &RateLimitRangeReadCloser{ RangeReadCloserIF: rrc, Limiter: ServerDownloadLimit, } @@ -364,7 +369,7 @@ type RangeReadReadAtSeeker struct { ss *SeekableStream masterOff int64 readers []*readerCur - *headCache + headCache *headCache } type headCache struct { @@ -406,7 +411,7 @@ func (c *headCache) read(p []byte) (n int, err error) { } return } -func (r *headCache) close() error { +func (r *headCache) Close() error { for i := range r.bufs { r.bufs[i] = nil } @@ -419,6 +424,7 @@ func (r *RangeReadReadAtSeeker) InitHeadCache() { reader := r.readers[0] r.readers = r.readers[1:] r.headCache = &headCache{readerCur: reader} + r.ss.Closers.Add(r.headCache) } } @@ -449,6 +455,18 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStr return r, nil } +func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) { + readers := make([]readerutil.SizeReaderAt, 0, len(ss)) + for _, s := range ss { + ra, err := NewReadAtSeeker(s, 0) + if err != nil { + return nil, err + } + readers = append(readers, io.NewSectionReader(ra, 0, s.GetSize())) + } + return readerutil.NewMultiReaderAt(readers...), nil +} + func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream { return r.ss } @@ -559,9 +577,6 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { } func (r *RangeReadReadAtSeeker) Close() error { - if r.headCache != nil { - _ = r.headCache.close() - } return r.ss.Close() } diff --git a/server/handles/archive.go b/server/handles/archive.go index 4ec933e1..550bc3ce 100644 --- a/server/handles/archive.go +++ b/server/handles/archive.go @@ -1,10 +1,11 @@ package handles import ( + "encoding/json" "fmt" + "github.com/alist-org/alist/v3/internal/task" "net/url" stdpath "path" - "strings" "github.com/alist-org/alist/v3/internal/archive/tool" "github.com/alist-org/alist/v3/internal/conf" @@ -208,14 +209,30 @@ func FsArchiveList(c *gin.Context) { }) } +type StringOrArray []string + +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var value string + if err := json.Unmarshal(data, &value); err == nil { + *s = []string{value} + return nil + } + var sliceValue []string + if err := json.Unmarshal(data, &sliceValue); err != nil { + return err + } + *s = sliceValue + return nil +} + type ArchiveDecompressReq struct { - SrcDir string `json:"src_dir" form:"src_dir"` - DstDir string `json:"dst_dir" form:"dst_dir"` - Name string `json:"name" form:"name"` - ArchivePass string `json:"archive_pass" form:"archive_pass"` - InnerPath string `json:"inner_path" form:"inner_path"` - CacheFull bool `json:"cache_full" form:"cache_full"` - PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"` + SrcDir string `json:"src_dir" form:"src_dir"` + DstDir string `json:"dst_dir" form:"dst_dir"` + Name StringOrArray `json:"name" form:"name"` + ArchivePass string `json:"archive_pass" form:"archive_pass"` + InnerPath string `json:"inner_path" form:"inner_path"` + CacheFull bool `json:"cache_full" form:"cache_full"` + PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"` } func FsArchiveDecompress(c *gin.Context) { @@ -229,41 +246,51 @@ func FsArchiveDecompress(c *gin.Context) { common.ErrorResp(c, errs.PermissionDenied, 403) return } - srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, req.Name)) - if err != nil { - common.ErrorResp(c, err, 403) - return + srcPaths := make([]string, 0, len(req.Name)) + for _, name := range req.Name { + srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, name)) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + srcPaths = append(srcPaths, srcPath) } dstDir, err := user.JoinPath(req.DstDir) if err != nil { common.ErrorResp(c, err, 403) return } - t, err := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{ - ArchiveInnerArgs: model.ArchiveInnerArgs{ - ArchiveArgs: model.ArchiveArgs{ - LinkArgs: model.LinkArgs{ - Header: c.Request.Header, - Type: c.Query("type"), - HttpReq: c.Request, + tasks := make([]task.TaskExtensionInfo, 0, len(srcPaths)) + for _, srcPath := range srcPaths { + t, e := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{ + ArchiveInnerArgs: model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: req.ArchivePass, }, - Password: req.ArchivePass, + InnerPath: utils.FixAndCleanPath(req.InnerPath), }, - InnerPath: utils.FixAndCleanPath(req.InnerPath), - }, - CacheFull: req.CacheFull, - PutIntoNewDir: req.PutIntoNewDir, - }) - if err != nil { - if errors.Is(err, errs.WrongArchivePassword) { - common.ErrorResp(c, err, 202) - } else { - common.ErrorResp(c, err, 500) + CacheFull: req.CacheFull, + PutIntoNewDir: req.PutIntoNewDir, + }) + if e != nil { + if errors.Is(e, errs.WrongArchivePassword) { + common.ErrorResp(c, e, 202) + } else { + common.ErrorResp(c, e, 500) + } + return + } + if t != nil { + tasks = append(tasks, t) } - return } common.SuccessResp(c, gin.H{ - "task": getTaskInfo(t), + "task": getTaskInfos(tasks), }) } @@ -376,7 +403,7 @@ func ArchiveInternalExtract(c *gin.Context) { func ArchiveExtensions(c *gin.Context) { var ext []string for key := range tool.Tools { - ext = append(ext, strings.TrimPrefix(key, ".")) + ext = append(ext, key) } common.SuccessResp(c, ext) } From 5668e4a4ea005690105bf174b6c26b4dec7bb5c4 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Thu, 27 Mar 2025 23:21:42 +0800 Subject: [PATCH 148/187] feat(doubao): add Doubao driver (#8232 closes #8020 #8206) * feat(doubao): implement List() * feat(doubao): implement Link() * feat(doubao): implement MakeDir() * refactor(doubao): add type Object to store key * feat(doubao): implement Move() * feat(doubao): implement Rename() * feat(doubao): implement Remove() --- drivers/all.go | 1 + drivers/doubao/driver.go | 174 +++++++++++++++++++++++++++++++++++++++ drivers/doubao/meta.go | 34 ++++++++ drivers/doubao/types.go | 64 ++++++++++++++ drivers/doubao/util.go | 38 +++++++++ 5 files changed, 311 insertions(+) create mode 100644 drivers/doubao/driver.go create mode 100644 drivers/doubao/meta.go create mode 100644 drivers/doubao/types.go create mode 100644 drivers/doubao/util.go diff --git a/drivers/all.go b/drivers/all.go index 963f0c44..a14e80fb 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -22,6 +22,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/chaoxing" _ "github.com/alist-org/alist/v3/drivers/cloudreve" _ "github.com/alist-org/alist/v3/drivers/crypt" + _ "github.com/alist-org/alist/v3/drivers/doubao" _ "github.com/alist-org/alist/v3/drivers/dropbox" _ "github.com/alist-org/alist/v3/drivers/febbox" _ "github.com/alist-org/alist/v3/drivers/ftp" diff --git a/drivers/doubao/driver.go b/drivers/doubao/driver.go new file mode 100644 index 00000000..b847ffa9 --- /dev/null +++ b/drivers/doubao/driver.go @@ -0,0 +1,174 @@ +package doubao + +import ( + "context" + "errors" + "time" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/go-resty/resty/v2" + "github.com/google/uuid" +) + +type Doubao struct { + model.Storage + Addition +} + +func (d *Doubao) Config() driver.Config { + return config +} + +func (d *Doubao) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Doubao) Init(ctx context.Context) error { + // TODO login / refresh token + //op.MustSaveDriverStorage(d) + return nil +} + +func (d *Doubao) Drop(ctx context.Context) error { + return nil +} + +func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + var files []model.Obj + var r NodeInfoResp + _, err := d.request("/samantha/aispace/node_info", "POST", func(req *resty.Request) { + req.SetBody(base.Json{ + "node_id": dir.GetID(), + "need_full_path": false, + }) + }, &r) + if err != nil { + return nil, err + } + + for _, child := range r.Data.Children { + files = append(files, &Object{ + Object: model.Object{ + ID: child.ID, + Path: child.ParentID, + Name: child.Name, + Size: int64(child.Size), + Modified: time.Unix(int64(child.UpdateTime), 0), + Ctime: time.Unix(int64(child.CreateTime), 0), + IsFolder: child.NodeType == 1, + }, + Key: child.Key, + }) + } + return files, nil +} + +func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + if u, ok := file.(*Object); ok { + var r GetFileUrlResp + _, err := d.request("/alice/message/get_file_url", "POST", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{u.Key}, + "type": "file", + }) + }, &r) + if err != nil { + return nil, err + } + return &model.Link{ + URL: r.Data.FileUrls[0].MainURL, + }, nil + } + return nil, errors.New("can't convert obj to URL") +} + +func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + var r UploadNodeResp + _, err := d.request("/samantha/aispace/upload_node", "POST", func(req *resty.Request) { + req.SetBody(base.Json{ + "node_list": []base.Json{ + { + "local_id": uuid.New().String(), + "name": dirName, + "parent_id": parentDir.GetID(), + "node_type": 1, + }, + }, + }) + }, &r) + return err +} + +func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + var r UploadNodeResp + _, err := d.request("/samantha/aispace/move_node", "POST", func(req *resty.Request) { + req.SetBody(base.Json{ + "node_list": []base.Json{ + {"id": srcObj.GetID()}, + }, + "current_parent_id": srcObj.GetPath(), + "target_parent_id": dstDir.GetID(), + }) + }, &r) + return err +} + +func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + var r BaseResp + _, err := d.request("/samantha/aispace/rename_node", "POST", func(req *resty.Request) { + req.SetBody(base.Json{ + "node_id": srcObj.GetID(), + "node_name": newName, + }) + }, &r) + return err +} + +func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO copy obj, optional + return nil, errs.NotImplement +} + +func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error { + var r BaseResp + _, err := d.request("/samantha/aispace/delete_node", "POST", func(req *resty.Request) { + req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}}) + }, &r) + return err +} + +func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + // TODO upload file, optional + return nil, errs.NotImplement +} + +func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Doubao) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Doubao) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Doubao) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { + // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional + // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir + // return errs.NotImplement to use an internal archive tool + return nil, errs.NotImplement +} + +//func (d *Doubao) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*Doubao)(nil) diff --git a/drivers/doubao/meta.go b/drivers/doubao/meta.go new file mode 100644 index 00000000..bb9e3f25 --- /dev/null +++ b/drivers/doubao/meta.go @@ -0,0 +1,34 @@ +package doubao + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + // driver.RootPath + driver.RootID + // define other + Cookie string `json:"cookie" type:"text"` +} + +var config = driver.Config{ + Name: "Doubao", + LocalSort: true, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: true, + NeedMs: false, + DefaultRoot: "0", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Doubao{} + }) +} diff --git a/drivers/doubao/types.go b/drivers/doubao/types.go new file mode 100644 index 00000000..f9611d86 --- /dev/null +++ b/drivers/doubao/types.go @@ -0,0 +1,64 @@ +package doubao + +import "github.com/alist-org/alist/v3/internal/model" + +type BaseResp struct { + Code int `json:"code"` + Msg string `json:"msg"` +} + +type NodeInfoResp struct { + BaseResp + Data struct { + NodeInfo NodeInfo `json:"node_info"` + Children []NodeInfo `json:"children"` + NextCursor string `json:"next_cursor"` + HasMore bool `json:"has_more"` + } `json:"data"` +} + +type NodeInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹 + Size int `json:"size"` + Source int `json:"source"` + NameReviewStatus int `json:"name_review_status"` + ContentReviewStatus int `json:"content_review_status"` + RiskReviewStatus int `json:"risk_review_status"` + ConversationID string `json:"conversation_id"` + ParentID string `json:"parent_id"` + CreateTime int `json:"create_time"` + UpdateTime int `json:"update_time"` +} + +type GetFileUrlResp struct { + BaseResp + Data struct { + FileUrls []struct { + URI string `json:"uri"` + MainURL string `json:"main_url"` + BackURL string `json:"back_url"` + } `json:"file_urls"` + } `json:"data"` +} + +type UploadNodeResp struct { + BaseResp + Data struct { + NodeList []struct { + LocalID string `json:"local_id"` + ID string `json:"id"` + ParentID string `json:"parent_id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹 + } `json:"node_list"` + } `json:"data"` +} + +type Object struct { + model.Object + Key string +} diff --git a/drivers/doubao/util.go b/drivers/doubao/util.go new file mode 100644 index 00000000..977691c0 --- /dev/null +++ b/drivers/doubao/util.go @@ -0,0 +1,38 @@ +package doubao + +import ( + "errors" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/pkg/utils" + log "github.com/sirupsen/logrus" +) + +// do others that not defined in Driver interface +func (d *Doubao) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + url := "https://www.doubao.com" + path + req := base.RestyClient.R() + req.SetHeader("Cookie", d.Cookie) + if callback != nil { + callback(req) + } + var r BaseResp + req.SetResult(&r) + res, err := req.Execute(method, url) + log.Debugln(res.String()) + if err != nil { + return nil, err + } + + // 业务状态码检查(优先于HTTP状态码) + if r.Code != 0 { + return res.Body(), errors.New(r.Msg) + } + if resp != nil { + err = utils.Json.Unmarshal(res.Body(), resp) + if err != nil { + return nil, err + } + } + return res.Body(), nil +} From c38dc6df7c9defa23b2aa6826c24ccc43cdc94f6 Mon Sep 17 00:00:00 2001 From: never lee Date: Thu, 27 Mar 2025 23:22:08 +0800 Subject: [PATCH 149/187] fix(115_open): support multipart upload (#8229) Co-authored-by: neverlee --- drivers/115_open/driver.go | 15 +--- drivers/115_open/upload.go | 140 +++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 14 deletions(-) create mode 100644 drivers/115_open/upload.go diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go index 00337c0b..0eb943ac 100644 --- a/drivers/115_open/driver.go +++ b/drivers/115_open/driver.go @@ -2,7 +2,6 @@ package _115_open import ( "context" - "encoding/base64" "fmt" "io" "net/http" @@ -16,7 +15,6 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" - "github.com/aliyun/aliyun-oss-go-sdk/oss" sdk "github.com/xhofe/115-sdk-go" ) @@ -265,18 +263,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre return err } // 4. upload - ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) - if err != nil { - return err - } - bucket, err := ossClient.Bucket(resp.Bucket) - if err != nil { - return err - } - err = bucket.PutObject(resp.Object, tempF, - oss.Callback(base64.StdEncoding.EncodeToString([]byte(resp.Callback.Value.Callback))), - oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(resp.Callback.Value.CallbackVar))), - ) + err = d.multpartUpload(ctx, tempF, file, up, tokenResp, resp) if err != nil { return err } diff --git a/drivers/115_open/upload.go b/drivers/115_open/upload.go new file mode 100644 index 00000000..282582ef --- /dev/null +++ b/drivers/115_open/upload.go @@ -0,0 +1,140 @@ +package _115_open + +import ( + "context" + "encoding/base64" + "io" + "time" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/avast/retry-go" + sdk "github.com/xhofe/115-sdk-go" +) + +func calPartSize(fileSize int64) int64 { + var partSize int64 = 20 * utils.MB + if fileSize > partSize { + if fileSize > 1*utils.TB { // file Size over 1TB + partSize = 5 * utils.GB // file part size 5GB + } else if fileSize > 768*utils.GB { // over 768GB + partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part + } else if fileSize > 512*utils.GB { // over 512GB + partSize = 82463373 // ≈ 78.6432MB + } else if fileSize > 384*utils.GB { // over 384GB + partSize = 54975582 // ≈ 52.4288MB + } else if fileSize > 256*utils.GB { // over 256GB + partSize = 41231687 // ≈ 39.3216MB + } else if fileSize > 128*utils.GB { // over 128GB + partSize = 27487791 // ≈ 26.2144MB + } + } + return partSize +} + +func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error { + ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) + if err != nil { + return err + } + bucket, err := ossClient.Bucket(initResp.Bucket) + if err != nil { + return err + } + + err = bucket.PutObject(initResp.Object, tempF, + oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))), + oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))), + ) + + return err +} + +// type CallbackResult struct { +// State bool `json:"state"` +// Code int `json:"code"` +// Message string `json:"message"` +// Data struct { +// PickCode string `json:"pick_code"` +// FileName string `json:"file_name"` +// FileSize int64 `json:"file_size"` +// FileID string `json:"file_id"` +// ThumbURL string `json:"thumb_url"` +// Sha1 string `json:"sha1"` +// Aid int `json:"aid"` +// Cid string `json:"cid"` +// } `json:"data"` +// } + +func (d *Open115) multpartUpload(ctx context.Context, tempF model.File, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error { + fileSize := stream.GetSize() + chunkSize := calPartSize(fileSize) + + ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) + if err != nil { + return err + } + bucket, err := ossClient.Bucket(initResp.Bucket) + if err != nil { + return err + } + + imur, err := bucket.InitiateMultipartUpload(initResp.Object, oss.Sequential()) + if err != nil { + return err + } + + partNum := (stream.GetSize() + chunkSize - 1) / chunkSize + parts := make([]oss.UploadPart, partNum) + offset := int64(0) + for i := int64(1); i <= partNum; i++ { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + + partSize := chunkSize + if i == partNum { + partSize = fileSize - (i-1)*chunkSize + } + rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) + err = retry.Do(func() error { + _ = rd.Reset() + rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) + part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i)) + if err != nil { + return err + } + parts[i-1] = part + return nil + }, + retry.Attempts(3), + retry.DelayType(retry.BackOffDelay), + retry.Delay(time.Second)) + if err != nil { + return err + } + + if i == partNum { + offset = fileSize + } else { + offset += partSize + } + up(float64(offset) / float64(fileSize)) + } + + // callbackRespBytes := make([]byte, 1024) + _, err = bucket.CompleteMultipartUpload( + imur, + parts, + oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))), + oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))), + // oss.CallbackResult(&callbackRespBytes), + ) + if err != nil { + return err + } + + return nil +} From 7b62dcb88c5af31cc3bf5f2cd97032d64d14b43f Mon Sep 17 00:00:00 2001 From: Ljcbaby <46277145+ljcbaby@users.noreply.github.com> Date: Thu, 27 Mar 2025 23:22:55 +0800 Subject: [PATCH 150/187] fix(baidu_netdisk): deplicate retry (#8210 redo #7972, link #8180) --- drivers/baidu_netdisk/driver.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 6ea62197..4397d413 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -20,6 +20,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/errgroup" "github.com/alist-org/alist/v3/pkg/utils" + "github.com/avast/retry-go" log "github.com/sirupsen/logrus" ) @@ -260,7 +261,10 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F } } // step.2 上传分片 - threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread) + threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, + retry.Attempts(1), + retry.Delay(time.Second), + retry.DelayType(retry.BackOffDelay)) sem := semaphore.NewWeighted(3) for i, partseq := range precreateResp.BlockList { if utils.IsCanceled(upCtx) { From 0cde4e73d614a4bf0b28872391a13d0f8a6d166a Mon Sep 17 00:00:00 2001 From: jerry <109275116+jerry-harm@users.noreply.github.com> Date: Thu, 27 Mar 2025 23:25:23 +0800 Subject: [PATCH 151/187] feat(ipfs): better ipfs support (#8225) * feat: :sparkles: better ipfs support fixed mfs crud, added ipns support * Update driver.go clean up --- drivers/ipfs_api/driver.go | 75 +++++++++++++++++++++++--------------- drivers/ipfs_api/meta.go | 4 +- 2 files changed, 48 insertions(+), 31 deletions(-) diff --git a/drivers/ipfs_api/driver.go b/drivers/ipfs_api/driver.go index 77760656..e59da7ca 100644 --- a/drivers/ipfs_api/driver.go +++ b/drivers/ipfs_api/driver.go @@ -4,13 +4,13 @@ import ( "context" "fmt" "net/url" - stdpath "path" "path/filepath" "strings" + shell "github.com/ipfs/go-ipfs-api" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" - shell "github.com/ipfs/go-ipfs-api" ) type IPFS struct { @@ -44,27 +44,32 @@ func (d *IPFS) Drop(ctx context.Context) error { func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { path := dir.GetPath() - if path[len(path):] != "/" { - path += "/" + switch d.Mode { + case "ipfs": + path, _ = url.JoinPath("/ipfs", path) + case "ipns": + path, _ = url.JoinPath("/ipns", path) + case "mfs": + fileStat, err := d.sh.FilesStat(ctx, path) + if err != nil { + return nil, err + } + path, _ = url.JoinPath("/ipfs", fileStat.Hash) + default: + return nil, fmt.Errorf("mode error") } - path_cid, err := d.sh.FilesStat(ctx, path) - if err != nil { - return nil, err - } - - dirs, err := d.sh.List(path_cid.Hash) + dirs, err := d.sh.List(path) if err != nil { return nil, err } objlist := []model.Obj{} for _, file := range dirs { - gateurl := *d.gateURL - gateurl.Path = "ipfs/" + file.Hash + gateurl := *d.gateURL.JoinPath("/ipfs/" + file.Hash) gateurl.RawQuery = "filename=" + url.PathEscape(file.Name) objlist = append(objlist, &model.ObjectURL{ - Object: model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1}, + Object: model.Object{ID: "/ipfs/" + file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1}, Url: model.Url{Url: gateurl.String()}, }) } @@ -73,11 +78,15 @@ func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([] } func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { - link := d.Gateway + "/ipfs/" + file.GetID() + "/?filename=" + url.PathEscape(file.GetName()) - return &model.Link{URL: link}, nil + gateurl := d.gateURL.JoinPath(file.GetID()) + gateurl.RawQuery = "filename=" + url.PathEscape(file.GetName()) + return &model.Link{URL: gateurl.String()}, nil } func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if d.Mode != "mfs" { + return fmt.Errorf("only write in mfs mode") + } path := parentDir.GetPath() if path[len(path):] != "/" { path += "/" @@ -86,42 +95,48 @@ func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) } func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if d.Mode != "mfs" { + return fmt.Errorf("only write in mfs mode") + } return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath()) } func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + if d.Mode != "mfs" { + return fmt.Errorf("only write in mfs mode") + } newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/")) } func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { - // TODO copy obj, optional - fmt.Println(srcObj.GetPath()) - fmt.Println(dstDir.GetPath()) + if d.Mode != "mfs" { + return fmt.Errorf("only write in mfs mode") + } newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath()) - fmt.Println(newFileName) return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/")) } func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error { - // TODO remove obj, optional + if d.Mode != "mfs" { + return fmt.Errorf("only write in mfs mode") + } return d.sh.FilesRm(ctx, obj.GetPath(), true) } func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { - // TODO upload file, optional - _, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + if d.Mode != "mfs" { + return fmt.Errorf("only write in mfs mode") + } + outHash, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: s, UpdateProgress: up, - }), ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName()))) - return err -} - -func ToFiles(dstDir string) shell.AddOpts { - return func(rb *shell.RequestBuilder) error { - rb.Option("to-files", dstDir) - return nil + })) + if err != nil { + return err } + err = d.sh.FilesCp(ctx, "/ipfs/"+outHash, dstDir.GetPath()+"/"+strings.ReplaceAll(s.GetName(), "\\", "/")) + return err } //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { diff --git a/drivers/ipfs_api/meta.go b/drivers/ipfs_api/meta.go index cdc30424..c145644c 100644 --- a/drivers/ipfs_api/meta.go +++ b/drivers/ipfs_api/meta.go @@ -8,14 +8,16 @@ import ( type Addition struct { // Usually one of two driver.RootPath + Mode string `json:"mode" options:"ipfs,ipns,mfs" type:"select" required:"true"` Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"` - Gateway string `json:"gateway" default:"https://ipfs.io"` + Gateway string `json:"gateway" default:"http://127.0.0.1:8080"` } var config = driver.Config{ Name: "IPFS API", DefaultRoot: "/", LocalSort: true, + OnlyProxy: false, } func init() { From e4bd223d1c0eb376fc812bbe3427614e48271a40 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Thu, 3 Apr 2025 20:29:53 +0800 Subject: [PATCH 152/187] fix(deps): update 115-sdk-go to v0.1.5 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5ed8a27b..f8a238f1 100644 --- a/go.mod +++ b/go.mod @@ -111,7 +111,7 @@ require ( github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect github.com/therootcompany/xz v1.0.1 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/xhofe/115-sdk-go v0.1.4 + github.com/xhofe/115-sdk-go v0.1.5 github.com/yuin/goldmark v1.7.8 go4.org v0.0.0-20230225012048-214862532bf5 resty.dev/v3 v3.0.0-beta.2 // indirect diff --git a/go.sum b/go.sum index bf98a8cd..1681a3a0 100644 --- a/go.sum +++ b/go.sum @@ -606,8 +606,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xhofe/115-sdk-go v0.1.4 h1:erIWuWH+kZQOEHM+YZK8Y6sWQ2s/SFJIFh/WeCtjiiY= -github.com/xhofe/115-sdk-go v0.1.4/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= +github.com/xhofe/115-sdk-go v0.1.5 h1:2+E92l6AX0+ABAkrdmDa9PE5ONN7wVLCaKkK80zETOg= +github.com/xhofe/115-sdk-go v0.1.5/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM= From 37640221c05adb5f09bb06a53840253b7c0abbb2 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Thu, 3 Apr 2025 20:34:27 +0800 Subject: [PATCH 153/187] fix(doubao): update file size type to int64 (#8289) --- drivers/doubao/driver.go | 6 +++--- drivers/doubao/types.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/doubao/driver.go b/drivers/doubao/driver.go index b847ffa9..04f74325 100644 --- a/drivers/doubao/driver.go +++ b/drivers/doubao/driver.go @@ -55,9 +55,9 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ( ID: child.ID, Path: child.ParentID, Name: child.Name, - Size: int64(child.Size), - Modified: time.Unix(int64(child.UpdateTime), 0), - Ctime: time.Unix(int64(child.CreateTime), 0), + Size: child.Size, + Modified: time.Unix(child.UpdateTime, 0), + Ctime: time.Unix(child.CreateTime, 0), IsFolder: child.NodeType == 1, }, Key: child.Key, diff --git a/drivers/doubao/types.go b/drivers/doubao/types.go index f9611d86..2dc5a61d 100644 --- a/drivers/doubao/types.go +++ b/drivers/doubao/types.go @@ -22,15 +22,15 @@ type NodeInfo struct { Name string `json:"name"` Key string `json:"key"` NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹 - Size int `json:"size"` + Size int64 `json:"size"` Source int `json:"source"` NameReviewStatus int `json:"name_review_status"` ContentReviewStatus int `json:"content_review_status"` RiskReviewStatus int `json:"risk_review_status"` ConversationID string `json:"conversation_id"` ParentID string `json:"parent_id"` - CreateTime int `json:"create_time"` - UpdateTime int `json:"update_time"` + CreateTime int64 `json:"create_time"` + UpdateTime int64 `json:"update_time"` } type GetFileUrlResp struct { From affd0cecd1a131d78a5d3e695ecaeb8d98397cb5 Mon Sep 17 00:00:00 2001 From: YangXu <47767754+Three-taile-dragon@users.noreply.github.com> Date: Thu, 3 Apr 2025 20:35:14 +0800 Subject: [PATCH 154/187] fix(pikpak&pikpak_share): update algorithms (#8278) --- drivers/pikpak/driver.go | 2 +- drivers/pikpak/util.go | 56 ++++++++++++++++------------------ drivers/pikpak_share/driver.go | 2 +- drivers/pikpak_share/util.go | 56 ++++++++++++++++------------------ 4 files changed, 56 insertions(+), 60 deletions(-) diff --git a/drivers/pikpak/driver.go b/drivers/pikpak/driver.go index 504b1d0e..6c64e6fb 100644 --- a/drivers/pikpak/driver.go +++ b/drivers/pikpak/driver.go @@ -69,7 +69,7 @@ func (d *PikPak) Init(ctx context.Context) (err error) { d.ClientVersion = PCClientVersion d.PackageName = PCPackageName d.Algorithms = PCAlgorithms - d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36" + d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.6.11.4955 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36" } if d.Addition.CaptchaToken != "" && d.Addition.RefreshToken == "" { diff --git a/drivers/pikpak/util.go b/drivers/pikpak/util.go index f2594e78..61396aa4 100644 --- a/drivers/pikpak/util.go +++ b/drivers/pikpak/util.go @@ -28,34 +28,32 @@ import ( ) var AndroidAlgorithms = []string{ - "7xOq4Z8s", - "QE9/9+IQco", - "WdX5J9CPLZp", - "NmQ5qFAXqH3w984cYhMeC5TJR8j", - "cc44M+l7GDhav", - "KxGjo/wHB+Yx8Lf7kMP+/m9I+", - "wla81BUVSmDkctHDpUT", - "c6wMr1sm1WxiR3i8LDAm3W", - "hRLrEQCFNYi0PFPV", - "o1J41zIraDtJPNuhBu7Ifb/q3", - "U", - "RrbZvV0CTu3gaZJ56PVKki4IeP", - "NNuRbLckJqUp1Do0YlrKCUP", - "UUwnBbipMTvInA0U0E9", - "VzGc", + "SOP04dGzk0TNO7t7t9ekDbAmx+eq0OI1ovEx", + "nVBjhYiND4hZ2NCGyV5beamIr7k6ifAsAbl", + "Ddjpt5B/Cit6EDq2a6cXgxY9lkEIOw4yC1GDF28KrA", + "VVCogcmSNIVvgV6U+AochorydiSymi68YVNGiz", + "u5ujk5sM62gpJOsB/1Gu/zsfgfZO", + "dXYIiBOAHZgzSruaQ2Nhrqc2im", + "z5jUTBSIpBN9g4qSJGlidNAutX6", + "KJE2oveZ34du/g1tiimm", } var WebAlgorithms = []string{ - "fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr", - "uSUX02HYJ1IkyLdhINEFcCf7l2", - "iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41", - "3binT1s/5a1pu3fGsN", - "8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5", - "DYS3StqnAEKdGddRP8CJrxUSFh", - "crquW+4", - "ryKqvW9B9hly+JAymXCIfag5Z", - "Hr08T/NDTX1oSJfHk90c", - "i", + "C9qPpZLN8ucRTaTiUMWYS9cQvWOE", + "+r6CQVxjzJV6LCV", + "F", + "pFJRC", + "9WXYIDGrwTCz2OiVlgZa90qpECPD6olt", + "/750aCr4lm/Sly/c", + "RB+DT/gZCrbV", + "", + "CyLsf7hdkIRxRm215hl", + "7xHvLi2tOYP0Y92b", + "ZGTXXxu8E/MIWaEDB+Sm/", + "1UI3", + "E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO", + "ihtqpG6FMt65+Xk+tWUH2", + "NhXXU9rg4XXdzo7u5o", } var PCAlgorithms = []string{ @@ -80,17 +78,17 @@ const ( const ( AndroidClientID = "YNxT9w7GMdWvEOKa" AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg" - AndroidClientVersion = "1.49.3" + AndroidClientVersion = "1.53.2" AndroidPackageName = "com.pikcloud.pikpak" - AndroidSdkVersion = "2.0.4.204101" + AndroidSdkVersion = "2.0.6.206003" WebClientID = "YUMx5nI8ZU8Ap8pm" WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg" - WebClientVersion = "undefined" + WebClientVersion = "2.0.0" WebPackageName = "drive.mypikpak.com" WebSdkVersion = "8.0.3" PCClientID = "YvtoWO6GNHiuCl7x" PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA" - PCClientVersion = "undefined" // 2.5.6.4831 + PCClientVersion = "undefined" // 2.6.11.4955 PCPackageName = "mypikpak.com" PCSdkVersion = "8.0.3" ) diff --git a/drivers/pikpak_share/driver.go b/drivers/pikpak_share/driver.go index d527a1ab..d6341bd9 100644 --- a/drivers/pikpak_share/driver.go +++ b/drivers/pikpak_share/driver.go @@ -66,7 +66,7 @@ func (d *PikPakShare) Init(ctx context.Context) error { d.ClientVersion = PCClientVersion d.PackageName = PCPackageName d.Algorithms = PCAlgorithms - d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36" + d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.6.11.4955 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36" } // 获取CaptchaToken diff --git a/drivers/pikpak_share/util.go b/drivers/pikpak_share/util.go index 172a6148..4111779f 100644 --- a/drivers/pikpak_share/util.go +++ b/drivers/pikpak_share/util.go @@ -17,34 +17,32 @@ import ( ) var AndroidAlgorithms = []string{ - "7xOq4Z8s", - "QE9/9+IQco", - "WdX5J9CPLZp", - "NmQ5qFAXqH3w984cYhMeC5TJR8j", - "cc44M+l7GDhav", - "KxGjo/wHB+Yx8Lf7kMP+/m9I+", - "wla81BUVSmDkctHDpUT", - "c6wMr1sm1WxiR3i8LDAm3W", - "hRLrEQCFNYi0PFPV", - "o1J41zIraDtJPNuhBu7Ifb/q3", - "U", - "RrbZvV0CTu3gaZJ56PVKki4IeP", - "NNuRbLckJqUp1Do0YlrKCUP", - "UUwnBbipMTvInA0U0E9", - "VzGc", + "SOP04dGzk0TNO7t7t9ekDbAmx+eq0OI1ovEx", + "nVBjhYiND4hZ2NCGyV5beamIr7k6ifAsAbl", + "Ddjpt5B/Cit6EDq2a6cXgxY9lkEIOw4yC1GDF28KrA", + "VVCogcmSNIVvgV6U+AochorydiSymi68YVNGiz", + "u5ujk5sM62gpJOsB/1Gu/zsfgfZO", + "dXYIiBOAHZgzSruaQ2Nhrqc2im", + "z5jUTBSIpBN9g4qSJGlidNAutX6", + "KJE2oveZ34du/g1tiimm", } var WebAlgorithms = []string{ - "fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr", - "uSUX02HYJ1IkyLdhINEFcCf7l2", - "iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41", - "3binT1s/5a1pu3fGsN", - "8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5", - "DYS3StqnAEKdGddRP8CJrxUSFh", - "crquW+4", - "ryKqvW9B9hly+JAymXCIfag5Z", - "Hr08T/NDTX1oSJfHk90c", - "i", + "C9qPpZLN8ucRTaTiUMWYS9cQvWOE", + "+r6CQVxjzJV6LCV", + "F", + "pFJRC", + "9WXYIDGrwTCz2OiVlgZa90qpECPD6olt", + "/750aCr4lm/Sly/c", + "RB+DT/gZCrbV", + "", + "CyLsf7hdkIRxRm215hl", + "7xHvLi2tOYP0Y92b", + "ZGTXXxu8E/MIWaEDB+Sm/", + "1UI3", + "E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO", + "ihtqpG6FMt65+Xk+tWUH2", + "NhXXU9rg4XXdzo7u5o", } var PCAlgorithms = []string{ @@ -63,17 +61,17 @@ var PCAlgorithms = []string{ const ( AndroidClientID = "YNxT9w7GMdWvEOKa" AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg" - AndroidClientVersion = "1.49.3" + AndroidClientVersion = "1.53.2" AndroidPackageName = "com.pikcloud.pikpak" - AndroidSdkVersion = "2.0.4.204101" + AndroidSdkVersion = "2.0.6.206003" WebClientID = "YUMx5nI8ZU8Ap8pm" WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg" - WebClientVersion = "undefined" + WebClientVersion = "2.0.0" WebPackageName = "drive.mypikpak.com" WebSdkVersion = "8.0.3" PCClientID = "YvtoWO6GNHiuCl7x" PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA" - PCClientVersion = "undefined" // 2.5.6.4831 + PCClientVersion = "undefined" // 2.6.11.4955 PCPackageName = "mypikpak.com" PCSdkVersion = "8.0.3" ) From a6304285b6271633d5c881ebff4472f93fc19e33 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Thu, 3 Apr 2025 20:35:52 +0800 Subject: [PATCH 155/187] fix: revert "refactor(net): pass request header" (#8269) https://github.com/AlistGo/alist/pull/8031/commits/5be50e77d9ad8d67e343aa7e9380bcdd2506ae8f --- internal/net/serve.go | 2 +- internal/stream/util.go | 12 ++---------- server/common/proxy.go | 4 ++-- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/internal/net/serve.go b/internal/net/serve.go index 8b6b3d1d..63e1cb45 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time // 使用请求的Context // 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞 - ctx := context.WithValue(r.Context(), "request_header", &r.Header) + ctx := context.WithValue(r.Context(), "request_header", r.Header) switch { case len(ranges) == 0: reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1}) diff --git a/internal/stream/util.go b/internal/stream/util.go index b2c76754..01019482 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -19,11 +19,7 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl } rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) { if link.Concurrency != 0 || link.PartSize != 0 { - requestHeader := ctx.Value("request_header") - if requestHeader == nil { - requestHeader = &http.Header{} - } - header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header) + header := net.ProcessHeader(nil, link.Header) down := net.NewDownloader(func(d *net.Downloader) { d.Concurrency = link.Concurrency d.PartSize = link.PartSize @@ -64,11 +60,7 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl } func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) { - requestHeader := ctx.Value("request_header") - if requestHeader == nil { - requestHeader = &http.Header{} - } - header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header) + header := net.ProcessHeader(nil, link.Header) header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header) return net.RequestHttp(ctx, "GET", header, link.URL) diff --git a/server/common/proxy.go b/server/common/proxy.go index c14af6fa..f9e1e4bb 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -50,9 +50,9 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { requestHeader := ctx.Value("request_header") if requestHeader == nil { - requestHeader = &http.Header{} + requestHeader = http.Header{} } - header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header) + header := net.ProcessHeader(requestHeader.(http.Header), link.Header) down := net.NewDownloader(func(d *net.Downloader) { d.Concurrency = link.Concurrency d.PartSize = link.PartSize From 465dd1703deda982cfff7c1fce4047932f78108e Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Thu, 3 Apr 2025 20:40:19 +0800 Subject: [PATCH 156/187] feat(cloudreve): s3 policy support (#8245) * feat(cloudreve): s3 policy support * fix(cloudreve): correct potential off-by-one error in `etags` initialization --- drivers/cloudreve/driver.go | 2 + drivers/cloudreve/types.go | 11 +++--- drivers/cloudreve/util.go | 79 +++++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+), 5 deletions(-) diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index d0ab30b6..8c2321b8 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -162,6 +162,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File switch r.Policy.Type { case "onedrive": err = d.upOneDrive(ctx, stream, u, up) + case "s3": + err = d.upS3(ctx, stream, u, up) case "remote": // 从机存储 err = d.upRemote(ctx, stream, u, up) case "local": // 本机存储 diff --git a/drivers/cloudreve/types.go b/drivers/cloudreve/types.go index a7c3919e..8a465f01 100644 --- a/drivers/cloudreve/types.go +++ b/drivers/cloudreve/types.go @@ -21,11 +21,12 @@ type Policy struct { } type UploadInfo struct { - SessionID string `json:"sessionID"` - ChunkSize int `json:"chunkSize"` - Expires int `json:"expires"` - UploadURLs []string `json:"uploadURLs"` - Credential string `json:"credential,omitempty"` + SessionID string `json:"sessionID"` + ChunkSize int `json:"chunkSize"` + Expires int `json:"expires"` + UploadURLs []string `json:"uploadURLs"` + Credential string `json:"credential,omitempty"` // local + CompleteURL string `json:"completeURL,omitempty"` // s3 } type DirectoryResp struct { diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index cffa7988..1fd5ed8a 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -312,3 +312,82 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u } return nil } + +func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { + var finish int64 = 0 + var chunk int = 0 + var etags []string + DEFAULT := int64(u.ChunkSize) + for finish < stream.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + utils.Log.Debugf("[Cloudreve-S3] upload: %d", finish) + var byteSize = DEFAULT + left := stream.GetSize() - finish + if left < DEFAULT { + byteSize = left + } + byteData := make([]byte, byteSize) + n, err := io.ReadFull(stream, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + req, err := http.NewRequest("PUT", u.UploadURLs[chunk], + driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.ContentLength = byteSize + finish += byteSize + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + _ = res.Body.Close() + etags = append(etags, res.Header.Get("ETag")) + up(float64(finish) * 100 / float64(stream.GetSize())) + chunk++ + } + + // s3LikeFinishUpload + // https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252 + bodyBuilder := &strings.Builder{} + bodyBuilder.WriteString("") + for i, etag := range etags { + bodyBuilder.WriteString(fmt.Sprintf( + `%d%s`, + i+1, // PartNumber 从 1 开始 + etag, + )) + } + bodyBuilder.WriteString("") + req, err := http.NewRequest( + "POST", + u.CompleteURL, + strings.NewReader(bodyBuilder.String()), + ) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/xml") + req.Header.Set("User-Agent", d.getUA()) + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body)) + } + + // 上传成功发送回调请求 + err = d.request(http.MethodGet, "/callback/s3/"+u.SessionID, nil, nil) + if err != nil { + return err + } + return nil +} From 31c55a2adf6f4dc9f972cf779019be8f2ea95ddb Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Thu, 3 Apr 2025 20:41:05 +0800 Subject: [PATCH 157/187] fix(archive): unable to preview (#8248) * fix(archive): unable to preview * fix bug --- internal/archive/tool/helper.go | 47 ++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/internal/archive/tool/helper.go b/internal/archive/tool/helper.go index 8f71900a..20da3446 100644 --- a/internal/archive/tool/helper.go +++ b/internal/archive/tool/helper.go @@ -29,7 +29,6 @@ type ArchiveReader interface { func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree) { encrypted := false dirMap := make(map[string]*model.ObjectTree) - dirMap["."] = &model.ObjectTree{} for _, file := range r.Files() { if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() { encrypted = true @@ -44,7 +43,7 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree dir = stdpath.Dir(name) dirObj = dirMap[dir] if dirObj == nil { - isNewFolder = true + isNewFolder = dir != "." dirObj = &model.ObjectTree{} dirObj.IsFolder = true dirObj.Name = stdpath.Base(dir) @@ -60,41 +59,45 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree dir = strings.TrimSuffix(name, "/") dirObj = dirMap[dir] if dirObj == nil { - isNewFolder = true + isNewFolder = dir != "." dirObj = &model.ObjectTree{} dirMap[dir] = dirObj } dirObj.IsFolder = true dirObj.Name = stdpath.Base(dir) dirObj.Modified = file.FileInfo().ModTime() - dirObj.Children = make([]model.ObjTree, 0) } if isNewFolder { // 将 文件夹 添加到 父文件夹 - dir = stdpath.Dir(dir) - pDirObj := dirMap[dir] - if pDirObj != nil { - pDirObj.Children = append(pDirObj.Children, dirObj) - continue - } - + // 考虑压缩包仅记录文件的路径,不记录文件夹 + // 循环创建所有父文件夹 + parentDir := stdpath.Dir(dir) for { - // 考虑压缩包仅记录文件的路径,不记录文件夹 - pDirObj = &model.ObjectTree{} - pDirObj.IsFolder = true - pDirObj.Name = stdpath.Base(dir) - pDirObj.Modified = file.FileInfo().ModTime() - dirMap[dir] = pDirObj - pDirObj.Children = append(pDirObj.Children, dirObj) - dir = stdpath.Dir(dir) - if dirMap[dir] != nil { + parentDirObj := dirMap[parentDir] + if parentDirObj == nil { + parentDirObj = &model.ObjectTree{} + if parentDir != "." { + parentDirObj.IsFolder = true + parentDirObj.Name = stdpath.Base(parentDir) + parentDirObj.Modified = file.FileInfo().ModTime() + } + dirMap[parentDir] = parentDirObj + } + parentDirObj.Children = append(parentDirObj.Children, dirObj) + + parentDir = stdpath.Dir(parentDir) + if dirMap[parentDir] != nil { break } - dirObj = pDirObj + dirObj = parentDirObj } } } - return encrypted, dirMap["."].GetChildren() + if len(dirMap) > 0 { + return encrypted, dirMap["."].GetChildren() + } else { + return encrypted, nil + } } func MakeModelObj(file os.FileInfo) *model.Object { From af18cb138bce30fb5d927b30fa80fb13f182fea1 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Thu, 3 Apr 2025 20:41:59 +0800 Subject: [PATCH 158/187] feat(139): add option ReportRealSize (#8244 close #8141) * feat(139): handle family upload errors * feat(139): add option `ReportRealSize` * Update drivers/139/driver.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- drivers/139/driver.go | 34 +++++++++++++++++++++++++++------- drivers/139/meta.go | 1 + drivers/139/types.go | 7 +++++++ 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/drivers/139/driver.go b/drivers/139/driver.go index c6b30335..f367c431 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -3,6 +3,7 @@ package _139 import ( "context" "encoding/base64" + "encoding/xml" "fmt" "io" "net/http" @@ -740,14 +741,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr break } } + var reportSize int64 + if d.ReportRealSize { + reportSize = stream.GetSize() + } else { + reportSize = 0 + } data := base.Json{ "manualRename": 2, "operation": 0, "fileCount": 1, - "totalSize": 0, // 去除上传大小限制 + "totalSize": reportSize, "uploadContentList": []base.Json{{ "contentName": stream.GetName(), - "contentSize": 0, // 去除上传大小限制 + "contentSize": reportSize, // "digest": "5a3231986ce7a6b46e408612d385bafa" }}, "parentCatalogID": dstDir.GetID(), @@ -765,10 +772,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "operation": 0, "path": path.Join(dstDir.GetPath(), dstDir.GetID()), "seqNo": random.String(32), //序列号不能为空 - "totalSize": 0, + "totalSize": reportSize, "uploadContentList": []base.Json{{ "contentName": stream.GetName(), - "contentSize": 0, + "contentSize": reportSize, // "digest": "5a3231986ce7a6b46e408612d385bafa" }}, }) @@ -779,6 +786,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr if err != nil { return err } + if resp.Data.Result.ResultCode != "0" { + return fmt.Errorf("get file upload url failed with result code: %s, message: %s", resp.Data.Result.ResultCode, resp.Data.Result.ResultDesc) + } // Progress p := driver.NewProgress(stream.GetSize(), up) @@ -820,13 +830,23 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr if err != nil { return err } - _ = res.Body.Close() - log.Debugf("%+v", res) if res.StatusCode != http.StatusOK { + res.Body.Close() return fmt.Errorf("unexpected status code: %d", res.StatusCode) } + bodyBytes, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("error reading response body: %v", err) + } + var result InterLayerUploadResult + err = xml.Unmarshal(bodyBytes, &result) + if err != nil { + return fmt.Errorf("error parsing XML: %v", err) + } + if result.ResultCode != 0 { + return fmt.Errorf("upload failed with result code: %d, message: %s", result.ResultCode, result.Msg) + } } - return nil default: return errs.NotImplement diff --git a/drivers/139/meta.go b/drivers/139/meta.go index d80b8566..866aadb4 100644 --- a/drivers/139/meta.go +++ b/drivers/139/meta.go @@ -12,6 +12,7 @@ type Addition struct { Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"` CloudID string `json:"cloud_id"` CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` + ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"` } var config = driver.Config{ diff --git a/drivers/139/types.go b/drivers/139/types.go index ac7079d8..50ae1f81 100644 --- a/drivers/139/types.go +++ b/drivers/139/types.go @@ -143,6 +143,13 @@ type UploadResp struct { } `json:"data"` } +type InterLayerUploadResult struct { + XMLName xml.Name `xml:"result"` + Text string `xml:",chardata"` + ResultCode int `xml:"resultCode"` + Msg string `xml:"msg"` +} + type CloudContent struct { ContentID string `json:"contentID"` //Modifier string `json:"modifier"` From 2e21df066105c078f3a7d435ab8232eae644172a Mon Sep 17 00:00:00 2001 From: New Future Date: Thu, 3 Apr 2025 20:43:21 +0800 Subject: [PATCH 159/187] feat(driver): add Azure Blob Storage driver (#8261) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add azure-blob driver * fix nested folders copy * feat(driver): add Azure Blob Storage driver 实现 Azure Blob Storage 驱动,支持以下功能: - 使用共享密钥身份验证初始化连接 - 列出目录和文件 - 生成临时 SAS URL 进行文件访问 - 创建目录 - 移动和重命名文件/文件夹 - 复制文件/文件夹 - 删除文件/文件夹 - 上传文件并支持进度跟踪 此驱动允许用户通过 AList 平台无缝访问和管理 Azure Blob Storage 中的数据。 * feat(driver): update help doc for Azure Blob * doc(readme): add new driver * Update drivers/azure_blob/driver.go fix(azure): fix name check Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update README.md doc(readme): fix the link Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix(azure): fix log and link --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- README.md | 1 + drivers/all.go | 1 + drivers/azure_blob/driver.go | 313 +++++++++++++++++++++++++++ drivers/azure_blob/meta.go | 27 +++ drivers/azure_blob/types.go | 20 ++ drivers/azure_blob/util.go | 401 +++++++++++++++++++++++++++++++++++ go.mod | 6 + go.sum | 6 + 8 files changed, 775 insertions(+) create mode 100644 drivers/azure_blob/driver.go create mode 100644 drivers/azure_blob/meta.go create mode 100644 drivers/azure_blob/types.go create mode 100644 drivers/azure_blob/util.go diff --git a/README.md b/README.md index d1189188..1261839e 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing - [x] [Dropbox](https://www.dropbox.com/) - [x] [FeijiPan](https://www.feijipan.com/) - [x] [dogecloud](https://www.dogecloud.com/product/oss) + - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] Easy to deploy and out-of-the-box - [x] File preview (PDF, markdown, code, plain text, ...) - [x] Image preview in gallery mode diff --git a/drivers/all.go b/drivers/all.go index a14e80fb..083d01dc 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -16,6 +16,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/aliyundrive" _ "github.com/alist-org/alist/v3/drivers/aliyundrive_open" _ "github.com/alist-org/alist/v3/drivers/aliyundrive_share" + _ "github.com/alist-org/alist/v3/drivers/azure_blob" _ "github.com/alist-org/alist/v3/drivers/baidu_netdisk" _ "github.com/alist-org/alist/v3/drivers/baidu_photo" _ "github.com/alist-org/alist/v3/drivers/baidu_share" diff --git a/drivers/azure_blob/driver.go b/drivers/azure_blob/driver.go new file mode 100644 index 00000000..6836533a --- /dev/null +++ b/drivers/azure_blob/driver.go @@ -0,0 +1,313 @@ +package azure_blob + +import ( + "context" + "fmt" + "io" + "path" + "regexp" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" +) +// Azure Blob Storage based on the blob APIs +// Link: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api +type AzureBlob struct { + model.Storage + Addition + client *azblob.Client + containerClient *container.Client + config driver.Config +} + +// Config returns the driver configuration. +func (d *AzureBlob) Config() driver.Config { + return d.config +} + +// GetAddition returns additional settings specific to Azure Blob Storage. +func (d *AzureBlob) GetAddition() driver.Additional { + return &d.Addition +} + +// Init initializes the Azure Blob Storage client using shared key authentication. +func (d *AzureBlob) Init(ctx context.Context) error { + // Validate the endpoint URL + accountName := extractAccountName(d.Addition.Endpoint) + if !regexp.MustCompile(`^[a-z0-9]+$`).MatchString(accountName) { + return fmt.Errorf("invalid storage account name: must be chars of lowercase letters or numbers only") + } + + credential, err := azblob.NewSharedKeyCredential(accountName, d.Addition.AccessKey) + if err != nil { + return fmt.Errorf("failed to create credential: %w", err) + } + + // Check if Endpoint is just account name + endpoint := d.Addition.Endpoint + if accountName == endpoint { + endpoint = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + } + // Initialize Azure Blob client with retry policy + client, err := azblob.NewClientWithSharedKeyCredential(endpoint, credential, + &azblob.ClientOptions{ClientOptions: azcore.ClientOptions{ + Retry: policy.RetryOptions{ + MaxRetries: MaxRetries, + RetryDelay: RetryDelay, + }, + }}) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + d.client = client + + // Ensure container exists or create it + containerName := strings.Trim(d.Addition.ContainerName, "/ \\") + if containerName == "" { + return fmt.Errorf("container name cannot be empty") + } + return d.createContainerIfNotExists(ctx, containerName) +} + +// Drop releases resources associated with the Azure Blob client. +func (d *AzureBlob) Drop(ctx context.Context) error { + d.client = nil + return nil +} + +// List retrieves blobs and directories under the specified path. +func (d *AzureBlob) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + prefix := ensureTrailingSlash(dir.GetPath()) + + pager := d.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{ + Prefix: &prefix, + }) + + var objs []model.Obj + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list blobs: %w", err) + } + + // Process directories + for _, blobPrefix := range page.Segment.BlobPrefixes { + objs = append(objs, &model.Object{ + Name: path.Base(strings.TrimSuffix(*blobPrefix.Name, "/")), + Path: *blobPrefix.Name, + Modified: *blobPrefix.Properties.LastModified, + Ctime: *blobPrefix.Properties.CreationTime, + IsFolder: true, + }) + } + + // Process files + for _, blob := range page.Segment.BlobItems { + if strings.HasSuffix(*blob.Name, "/") { + continue + } + objs = append(objs, &model.Object{ + Name: path.Base(*blob.Name), + Path: *blob.Name, + Size: *blob.Properties.ContentLength, + Modified: *blob.Properties.LastModified, + Ctime: *blob.Properties.CreationTime, + IsFolder: false, + }) + } + } + return objs, nil +} + +// Link generates a temporary SAS URL for accessing a blob. +func (d *AzureBlob) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + blobClient := d.containerClient.NewBlobClient(file.GetPath()) + expireDuration := time.Hour * time.Duration(d.SignURLExpire) + + sasURL, err := blobClient.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil) + if err != nil { + return nil, fmt.Errorf("failed to generate SAS URL: %w", err) + } + return &model.Link{URL: sasURL}, nil +} + +// MakeDir creates a virtual directory by uploading an empty blob as a marker. +func (d *AzureBlob) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + dirPath := path.Join(parentDir.GetPath(), dirName) + if err := d.mkDir(ctx, dirPath); err != nil { + return nil, fmt.Errorf("failed to create directory marker: %w", err) + } + + return &model.Object{ + Path: dirPath, + Name: dirName, + IsFolder: true, + }, nil +} + +// Move relocates an object (file or directory) to a new directory. +func (d *AzureBlob) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + srcPath := srcObj.GetPath() + dstPath := path.Join(dstDir.GetPath(), srcObj.GetName()) + + if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil { + return nil, fmt.Errorf("move operation failed: %w", err) + } + + return &model.Object{ + Path: dstPath, + Name: srcObj.GetName(), + Modified: time.Now(), + IsFolder: srcObj.IsDir(), + Size: srcObj.GetSize(), + }, nil +} + +// Rename changes the name of an existing object. +func (d *AzureBlob) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + srcPath := srcObj.GetPath() + dstPath := path.Join(path.Dir(srcPath), newName) + + if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil { + return nil, fmt.Errorf("rename operation failed: %w", err) + } + + return &model.Object{ + Path: dstPath, + Name: newName, + Modified: time.Now(), + IsFolder: srcObj.IsDir(), + Size: srcObj.GetSize(), + }, nil +} + +// Copy duplicates an object (file or directory) to a specified destination directory. +func (d *AzureBlob) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + dstPath := path.Join(dstDir.GetPath(), srcObj.GetName()) + + // Handle directory copying using flat listing + if srcObj.IsDir() { + srcPrefix := srcObj.GetPath() + srcPrefix = ensureTrailingSlash(srcPrefix) + + // Get all blobs under the source directory + blobs, err := d.flattenListBlobs(ctx, srcPrefix) + if err != nil { + return nil, fmt.Errorf("failed to list source directory contents: %w", err) + } + + // Process each blob - copy to destination + for _, blob := range blobs { + // Skip the directory marker itself + if *blob.Name == srcPrefix { + continue + } + + // Calculate relative path from source + relPath := strings.TrimPrefix(*blob.Name, srcPrefix) + itemDstPath := path.Join(dstPath, relPath) + + if strings.HasSuffix(itemDstPath, "/") || (blob.Metadata["hdi_isfolder"] != nil && *blob.Metadata["hdi_isfolder"] == "true") { + // Create directory marker at destination + err := d.mkDir(ctx, itemDstPath) + if err != nil { + return nil, fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err) + } + } else { + // Copy the blob + if err := d.copyFile(ctx, *blob.Name, itemDstPath); err != nil { + return nil, fmt.Errorf("failed to copy %s: %w", *blob.Name, err) + } + } + + } + + // Create directory marker at destination if needed + if len(blobs) == 0 { + err := d.mkDir(ctx, dstPath) + if err != nil { + return nil, fmt.Errorf("failed to create directory [%s]: %w", dstPath, err) + } + } + + return &model.Object{ + Path: dstPath, + Name: srcObj.GetName(), + Modified: time.Now(), + IsFolder: true, + }, nil + } + + // Copy a single file + if err := d.copyFile(ctx, srcObj.GetPath(), dstPath); err != nil { + return nil, fmt.Errorf("failed to copy blob: %w", err) + } + return &model.Object{ + Path: dstPath, + Name: srcObj.GetName(), + Size: srcObj.GetSize(), + Modified: time.Now(), + IsFolder: false, + }, nil +} + +// Remove deletes a specified blob or recursively deletes a directory and its contents. +func (d *AzureBlob) Remove(ctx context.Context, obj model.Obj) error { + path := obj.GetPath() + + // Handle recursive directory deletion + if obj.IsDir() { + return d.deleteFolder(ctx, path) + } + + // Delete single file + return d.deleteFile(ctx, path, false) +} + +// Put uploads a file stream to Azure Blob Storage with progress tracking. +func (d *AzureBlob) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + blobPath := path.Join(dstDir.GetPath(), stream.GetName()) + blobClient := d.containerClient.NewBlockBlobClient(blobPath) + + // Determine optimal upload options based on file size + options := optimizedUploadOptions(stream.GetSize()) + + // Track upload progress + progressTracker := &progressTracker{ + total: stream.GetSize(), + updateProgress: up, + } + + // Wrap stream to handle context cancellation and progress tracking + limitedStream := driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, progressTracker)) + + // Upload the stream to Azure Blob Storage + _, err := blobClient.UploadStream(ctx, limitedStream, options) + if err != nil { + return nil, fmt.Errorf("failed to upload file: %w", err) + } + + return &model.Object{ + Path: blobPath, + Name: stream.GetName(), + Size: stream.GetSize(), + Modified: time.Now(), + IsFolder: false, + }, nil +} + +// The following methods related to archive handling are not implemented yet. +// func (d *AzureBlob) GetArchiveMeta(...) {...} +// func (d *AzureBlob) ListArchive(...) {...} +// func (d *AzureBlob) Extract(...) {...} +// func (d *AzureBlob) ArchiveDecompress(...) {...} + +// Ensure AzureBlob implements the driver.Driver interface. +var _ driver.Driver = (*AzureBlob)(nil) diff --git a/drivers/azure_blob/meta.go b/drivers/azure_blob/meta.go new file mode 100644 index 00000000..8e42bdd6 --- /dev/null +++ b/drivers/azure_blob/meta.go @@ -0,0 +1,27 @@ +package azure_blob + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + Endpoint string `json:"endpoint" required:"true" default:"https://.blob.core.windows.net/" help:"e.g. https://accountname.blob.core.windows.net/. The full endpoint URL for Azure Storage, including the unique storage account name (3 ~ 24 numbers and lowercase letters only)."` + AccessKey string `json:"access_key" required:"true" help:"The access key for Azure Storage, used for authentication. https://learn.microsoft.com/azure/storage/common/storage-account-keys-manage"` + ContainerName string `json:"container_name" required:"true" help:"The name of the container in Azure Storage (created in the Azure portal). https://learn.microsoft.com/azure/storage/blobs/blob-containers-portal"` + SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."` +} + +var config = driver.Config{ + Name: "Azure Blob Storage", + LocalSort: true, + CheckStatus: true, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &AzureBlob{ + config: config, + } + }) +} diff --git a/drivers/azure_blob/types.go b/drivers/azure_blob/types.go new file mode 100644 index 00000000..01323e51 --- /dev/null +++ b/drivers/azure_blob/types.go @@ -0,0 +1,20 @@ +package azure_blob + +import "github.com/alist-org/alist/v3/internal/driver" + +// progressTracker is used to track upload progress +type progressTracker struct { + total int64 + current int64 + updateProgress driver.UpdateProgress +} + +// Write implements io.Writer to track progress +func (pt *progressTracker) Write(p []byte) (n int, err error) { + n = len(p) + pt.current += int64(n) + if pt.updateProgress != nil && pt.total > 0 { + pt.updateProgress(float64(pt.current) * 100 / float64(pt.total)) + } + return n, nil +} diff --git a/drivers/azure_blob/util.go b/drivers/azure_blob/util.go new file mode 100644 index 00000000..2adf3a0f --- /dev/null +++ b/drivers/azure_blob/util.go @@ -0,0 +1,401 @@ +package azure_blob + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "path" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + log "github.com/sirupsen/logrus" +) + +const ( + // MaxRetries defines the maximum number of retry attempts for Azure operations + MaxRetries = 3 + // RetryDelay defines the base delay between retries + RetryDelay = 3 * time.Second + // MaxBatchSize defines the maximum number of operations in a single batch request + MaxBatchSize = 128 +) + +// extractAccountName 从 Azure 存储 Endpoint 中提取账户名 +func extractAccountName(endpoint string) string { + // 移除协议前缀 + endpoint = strings.TrimPrefix(endpoint, "https://") + endpoint = strings.TrimPrefix(endpoint, "http://") + + // 获取第一个点之前的部分(即账户名) + parts := strings.Split(endpoint, ".") + if len(parts) > 0 { + // to lower case + return strings.ToLower(parts[0]) + } + return "" +} + +// isNotFoundError checks if the error is a "not found" type error +func isNotFoundError(err error) bool { + var storageErr *azcore.ResponseError + if errors.As(err, &storageErr) { + return storageErr.StatusCode == 404 + } + // Fallback to string matching for backwards compatibility + return err != nil && strings.Contains(err.Error(), "BlobNotFound") +} + +// flattenListBlobs - Optimize blob listing to handle pagination better +func (d *AzureBlob) flattenListBlobs(ctx context.Context, prefix string) ([]container.BlobItem, error) { + // Standardize prefix format + prefix = ensureTrailingSlash(prefix) + + var blobItems []container.BlobItem + pager := d.containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Prefix: &prefix, + Include: container.ListBlobsInclude{ + Metadata: true, + }, + }) + + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list blobs: %w", err) + } + + for _, blob := range page.Segment.BlobItems { + blobItems = append(blobItems, *blob) + } + } + + return blobItems, nil +} + +// batchDeleteBlobs - Simplify batch deletion logic +func (d *AzureBlob) batchDeleteBlobs(ctx context.Context, blobPaths []string) error { + if len(blobPaths) == 0 { + return nil + } + + // Process in batches of MaxBatchSize + for i := 0; i < len(blobPaths); i += MaxBatchSize { + end := min(i+MaxBatchSize, len(blobPaths)) + currentBatch := blobPaths[i:end] + + // Create batch builder + batchBuilder, err := d.containerClient.NewBatchBuilder() + if err != nil { + return fmt.Errorf("failed to create batch builder: %w", err) + } + + // Add delete operations + for _, blobPath := range currentBatch { + if err := batchBuilder.Delete(blobPath, nil); err != nil { + return fmt.Errorf("failed to add delete operation for %s: %w", blobPath, err) + } + } + + // Submit batch + responses, err := d.containerClient.SubmitBatch(ctx, batchBuilder, nil) + if err != nil { + return fmt.Errorf("batch delete request failed: %w", err) + } + + // Check responses + for _, resp := range responses.Responses { + if resp.Error != nil && !isNotFoundError(resp.Error) { + // 获取 blob 名称以提供更好的错误信息 + blobName := "unknown" + if resp.BlobName != nil { + blobName = *resp.BlobName + } + return fmt.Errorf("failed to delete blob %s: %v", blobName, resp.Error) + } + } + } + + return nil +} + +// deleteFolder recursively deletes a directory and all its contents +func (d *AzureBlob) deleteFolder(ctx context.Context, prefix string) error { + // Ensure directory path ends with slash + prefix = ensureTrailingSlash(prefix) + + // Get all blobs under the directory using flattenListBlobs + globs, err := d.flattenListBlobs(ctx, prefix) + if err != nil { + return fmt.Errorf("failed to list blobs for deletion: %w", err) + } + + // If there are blobs in the directory, delete them + if len(globs) > 0 { + // 分离文件和目录标记 + var filePaths []string + var dirPaths []string + + for _, blob := range globs { + blobName := *blob.Name + if isDirectory(blob) { + // remove trailing slash for directory names + dirPaths = append(dirPaths, strings.TrimSuffix(blobName, "/")) + } else { + filePaths = append(filePaths, blobName) + } + } + + // 先删除文件,再删除目录 + if len(filePaths) > 0 { + if err := d.batchDeleteBlobs(ctx, filePaths); err != nil { + return err + } + } + if len(dirPaths) > 0 { + // 按路径深度分组 + depthMap := make(map[int][]string) + for _, dir := range dirPaths { + depth := strings.Count(dir, "/") // 计算目录深度 + depthMap[depth] = append(depthMap[depth], dir) + } + + // 按深度从大到小排序 + var depths []int + for depth := range depthMap { + depths = append(depths, depth) + } + sort.Sort(sort.Reverse(sort.IntSlice(depths))) + + // 按深度逐层批量删除 + for _, depth := range depths { + batch := depthMap[depth] + if err := d.batchDeleteBlobs(ctx, batch); err != nil { + return err + } + } + } + } + + // 最后删除目录标记本身 + return d.deleteEmptyDirectory(ctx, prefix) +} + +// deleteFile deletes a single file or blob with better error handling +func (d *AzureBlob) deleteFile(ctx context.Context, path string, isDir bool) error { + blobClient := d.containerClient.NewBlobClient(path) + _, err := blobClient.Delete(ctx, nil) + if err != nil && !(isDir && isNotFoundError(err)) { + return err + } + return nil +} + +// copyFile copies a single blob from source path to destination path +func (d *AzureBlob) copyFile(ctx context.Context, srcPath, dstPath string) error { + srcBlob := d.containerClient.NewBlobClient(srcPath) + dstBlob := d.containerClient.NewBlobClient(dstPath) + + // Use configured expiration time for SAS URL + expireDuration := time.Hour * time.Duration(d.SignURLExpire) + srcURL, err := srcBlob.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil) + if err != nil { + return fmt.Errorf("failed to generate source SAS URL: %w", err) + } + + _, err = dstBlob.StartCopyFromURL(ctx, srcURL, nil) + return err + +} + +// createContainerIfNotExists - Create container if not exists +// Clean up commented code +func (d *AzureBlob) createContainerIfNotExists(ctx context.Context, containerName string) error { + serviceClient := d.client.ServiceClient() + containerClient := serviceClient.NewContainerClient(containerName) + + var options = service.CreateContainerOptions{} + _, err := containerClient.Create(ctx, &options) + if err != nil { + var responseErr *azcore.ResponseError + if errors.As(err, &responseErr) && responseErr.ErrorCode != "ContainerAlreadyExists" { + return fmt.Errorf("failed to create or access container [%s]: %w", containerName, err) + } + } + + d.containerClient = containerClient + return nil +} + +// mkDir creates a virtual directory marker by uploading an empty blob with metadata. +func (d *AzureBlob) mkDir(ctx context.Context, fullDirName string) error { + dirPath := ensureTrailingSlash(fullDirName) + blobClient := d.containerClient.NewBlockBlobClient(dirPath) + + // Upload an empty blob with metadata indicating it's a directory + _, err := blobClient.Upload(ctx, struct { + *bytes.Reader + io.Closer + }{ + Reader: bytes.NewReader([]byte{}), + Closer: io.NopCloser(nil), + }, &blockblob.UploadOptions{ + Metadata: map[string]*string{ + "hdi_isfolder": to.Ptr("true"), + }, + }) + return err +} + +// ensureTrailingSlash ensures the provided path ends with a trailing slash. +func ensureTrailingSlash(path string) string { + if !strings.HasSuffix(path, "/") { + return path + "/" + } + return path +} + +// moveOrRename moves or renames blobs or directories from source to destination. +func (d *AzureBlob) moveOrRename(ctx context.Context, srcPath, dstPath string, isDir bool, srcSize int64) error { + if isDir { + // Normalize paths for directory operations + srcPath = ensureTrailingSlash(srcPath) + dstPath = ensureTrailingSlash(dstPath) + + // List all blobs under the source directory + blobs, err := d.flattenListBlobs(ctx, srcPath) + if err != nil { + return fmt.Errorf("failed to list blobs: %w", err) + } + + // Iterate and copy each blob to the destination + for _, item := range blobs { + srcBlobName := *item.Name + relPath := strings.TrimPrefix(srcBlobName, srcPath) + itemDstPath := path.Join(dstPath, relPath) + + if isDirectory(item) { + // Create directory marker at destination + if err := d.mkDir(ctx, itemDstPath); err != nil { + return fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err) + } + } else { + // Copy file blob to destination + if err := d.copyFile(ctx, srcBlobName, itemDstPath); err != nil { + return fmt.Errorf("failed to copy blob [%s]: %w", srcBlobName, err) + } + } + } + + // Handle empty directories by creating a marker at destination + if len(blobs) == 0 { + if err := d.mkDir(ctx, dstPath); err != nil { + return fmt.Errorf("failed to create directory [%s]: %w", dstPath, err) + } + } + + // Delete source directory and its contents + if err := d.deleteFolder(ctx, srcPath); err != nil { + log.Warnf("failed to delete source directory [%s]: %v\n, and try again", srcPath, err) + // Retry deletion once more and ignore the result + if err := d.deleteFolder(ctx, srcPath); err != nil { + log.Errorf("Retry deletion of source directory [%s] failed: %v", srcPath, err) + } + } + + return nil + } + + // Single file move or rename operation + if err := d.copyFile(ctx, srcPath, dstPath); err != nil { + return fmt.Errorf("failed to copy file: %w", err) + } + + // Delete source file after successful copy + if err := d.deleteFile(ctx, srcPath, false); err != nil { + log.Errorf("Error deleting source file [%s]: %v", srcPath, err) + } + return nil +} + +// optimizedUploadOptions returns the optimal upload options based on file size +func optimizedUploadOptions(fileSize int64) *azblob.UploadStreamOptions { + options := &azblob.UploadStreamOptions{ + BlockSize: 4 * 1024 * 1024, // 4MB block size + Concurrency: 4, // Default concurrency + } + + // For large files, increase block size and concurrency + if fileSize > 256*1024*1024 { // For files larger than 256MB + options.BlockSize = 8 * 1024 * 1024 // 8MB blocks + options.Concurrency = 8 // More concurrent uploads + } + + // For very large files (>1GB) + if fileSize > 1024*1024*1024 { + options.BlockSize = 16 * 1024 * 1024 // 16MB blocks + options.Concurrency = 16 // Higher concurrency + } + + return options +} + +// isDirectory determines if a blob represents a directory +// Checks multiple indicators: path suffix, metadata, and content type +func isDirectory(blob container.BlobItem) bool { + // Check path suffix + if strings.HasSuffix(*blob.Name, "/") { + return true + } + + // Check metadata for directory marker + if blob.Metadata != nil { + if val, ok := blob.Metadata["hdi_isfolder"]; ok && val != nil && *val == "true" { + return true + } + // Azure Storage Explorer and other tools may use different metadata keys + if val, ok := blob.Metadata["is_directory"]; ok && val != nil && strings.ToLower(*val) == "true" { + return true + } + } + + // Check content type (some tools mark directories with specific content types) + if blob.Properties != nil && blob.Properties.ContentType != nil { + contentType := strings.ToLower(*blob.Properties.ContentType) + if blob.Properties.ContentLength != nil && *blob.Properties.ContentLength == 0 && (contentType == "application/directory" || contentType == "directory") { + return true + } + } + + return false +} + +// deleteEmptyDirectory deletes a directory only if it's empty +func (d *AzureBlob) deleteEmptyDirectory(ctx context.Context, dirPath string) error { + // Directory is empty, delete the directory marker + blobClient := d.containerClient.NewBlobClient(strings.TrimSuffix(dirPath, "/")) + _, err := blobClient.Delete(ctx, nil) + + // Also try deleting with trailing slash (for different directory marker formats) + if err != nil && isNotFoundError(err) { + blobClient = d.containerClient.NewBlobClient(dirPath) + _, err = blobClient.Delete(ctx, nil) + } + + // Ignore not found errors + if err != nil && isNotFoundError(err) { + log.Infof("Directory [%s] not found during deletion: %v", dirPath, err) + return nil + } + + return err +} diff --git a/go.mod b/go.mod index f8a238f1..97a477d3 100644 --- a/go.mod +++ b/go.mod @@ -79,6 +79,12 @@ require ( gorm.io/gorm v1.25.11 ) +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect +) + require ( github.com/STARRY-S/zip v0.2.1 // indirect github.com/aymerick/douceur v0.2.0 // indirect diff --git a/go.sum b/go.sum index 1681a3a0..86fb779e 100644 --- a/go.sum +++ b/go.sum @@ -19,6 +19,12 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= From ab68faef444a04f60247ee93be4442ec84104769 Mon Sep 17 00:00:00 2001 From: asdfghjkl <61342682+anobodys@users.noreply.github.com> Date: Thu, 3 Apr 2025 20:44:49 +0800 Subject: [PATCH 160/187] fix(baidu_netdisk): add another video crack api (#8275) Co-authored-by: anobodys --- drivers/baidu_netdisk/driver.go | 2 ++ drivers/baidu_netdisk/meta.go | 3 ++- drivers/baidu_netdisk/types.go | 2 +- drivers/baidu_netdisk/util.go | 45 ++++++++++++++++++++++++++++++++- 4 files changed, 49 insertions(+), 3 deletions(-) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 4397d413..3cc1ae9e 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -78,6 +78,8 @@ func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListA func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { if d.DownloadAPI == "crack" { return d.linkCrack(file, args) + } else if d.DownloadAPI == "crack_video" { + return d.linkCrackVideo(file, args) } return d.linkOfficial(file, args) } diff --git a/drivers/baidu_netdisk/meta.go b/drivers/baidu_netdisk/meta.go index e9226a0d..27571056 100644 --- a/drivers/baidu_netdisk/meta.go +++ b/drivers/baidu_netdisk/meta.go @@ -10,7 +10,7 @@ type Addition struct { driver.RootPath OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` - DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"` + DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"` ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"` @@ -19,6 +19,7 @@ type Addition struct { UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"` CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"` + OnlyListVideoFile bool `json:"only_list_video_file" default:"false"` } var config = driver.Config{ diff --git a/drivers/baidu_netdisk/types.go b/drivers/baidu_netdisk/types.go index 728273b8..ed9b09df 100644 --- a/drivers/baidu_netdisk/types.go +++ b/drivers/baidu_netdisk/types.go @@ -17,7 +17,7 @@ type TokenErrResp struct { type File struct { //TkbindId int `json:"tkbind_id"` //OwnerType int `json:"owner_type"` - //Category int `json:"category"` + Category int `json:"category"` //RealCategory string `json:"real_category"` FsId int64 `json:"fs_id"` //OperId int `json:"oper_id"` diff --git a/drivers/baidu_netdisk/util.go b/drivers/baidu_netdisk/util.go index a4fc13f8..1249b3f4 100644 --- a/drivers/baidu_netdisk/util.go +++ b/drivers/baidu_netdisk/util.go @@ -79,6 +79,12 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall return retry.Unrecoverable(err2) } } + + if 31023 == errno && d.DownloadAPI == "crack_video" { + result = res.Body() + return nil + } + return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno) } result = res.Body() @@ -131,7 +137,16 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) { if len(resp.List) == 0 { break } - res = append(res, resp.List...) + + if d.OnlyListVideoFile { + for _, file := range resp.List { + if file.Isdir == 1 || file.Category == 1 { + res = append(res, file) + } + } + } else { + res = append(res, resp.List...) + } } return res, nil } @@ -187,6 +202,34 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link, }, nil } +func (d *BaiduNetdisk) linkCrackVideo(file model.Obj, _ model.LinkArgs) (*model.Link, error) { + param := map[string]string{ + "type": "VideoURL", + "path": fmt.Sprintf("%s", file.GetPath()), + "fs_id": file.GetID(), + "devuid": "0%1", + "clienttype": "1", + "channel": "android_15_25010PN30C_bd-netdisk_1523a", + "nom3u8": "1", + "dlink": "1", + "media": "1", + "origin": "dlna", + } + resp, err := d.request("https://pan.baidu.com/api/mediainfo", http.MethodGet, func(req *resty.Request) { + req.SetQueryParams(param) + }, nil) + if err != nil { + return nil, err + } + + return &model.Link{ + URL: utils.Json.Get(resp, "info", "dlink").ToString(), + Header: http.Header{ + "User-Agent": []string{d.CustomCrackUA}, + }, + }, nil +} + func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) { params := map[string]string{ "method": "filemanager", From 3375c26c413ff31190a59fab5e40696348c099e9 Mon Sep 17 00:00:00 2001 From: xiaoQQya <46475319+xiaoQQya@users.noreply.github.com> Date: Thu, 3 Apr 2025 20:50:29 +0800 Subject: [PATCH 161/187] perf(quark_uc&quark_uc_tv): native proxy multithreading (#8287) * perf(quark_uc): native proxy multithreading * perf(quark_uc_tv): native proxy multithreading * chore(fs): file query result add id --- drivers/quark_uc/driver.go | 2 +- drivers/quark_uc_tv/driver.go | 9 ++++++--- server/handles/fsread.go | 6 ++++++ 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/quark_uc/driver.go b/drivers/quark_uc/driver.go index 04757b1b..0f8884fa 100644 --- a/drivers/quark_uc/driver.go +++ b/drivers/quark_uc/driver.go @@ -74,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg "Referer": []string{d.conf.referer}, "User-Agent": []string{ua}, }, - Concurrency: 2, + Concurrency: 3, PartSize: 10 * utils.MB, }, nil } diff --git a/drivers/quark_uc_tv/driver.go b/drivers/quark_uc_tv/driver.go index ff7ccf20..a857e2dd 100644 --- a/drivers/quark_uc_tv/driver.go +++ b/drivers/quark_uc_tv/driver.go @@ -125,7 +125,6 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs } func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { - files := &model.Link{} var fileLink FileLink _, err := d.request(ctx, "/file", "GET", func(req *resty.Request) { req.SetQueryParams(map[string]string{ @@ -139,8 +138,12 @@ func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArg if err != nil { return nil, err } - files.URL = fileLink.Data.DownloadURL - return files, nil + + return &model.Link{ + URL: fileLink.Data.DownloadURL, + Concurrency: 3, + PartSize: 10 * utils.MB, + }, nil } func (d *QuarkUCTV) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { diff --git a/server/handles/fsread.go b/server/handles/fsread.go index 0a62f1ff..73bde23b 100644 --- a/server/handles/fsread.go +++ b/server/handles/fsread.go @@ -33,6 +33,8 @@ type DirReq struct { } type ObjResp struct { + Id string `json:"id"` + Path string `json:"path"` Name string `json:"name"` Size int64 `json:"size"` IsDir bool `json:"is_dir"` @@ -210,6 +212,8 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp { for _, obj := range objs { thumb, _ := model.GetThumb(obj) resp = append(resp, ObjResp{ + Id: obj.GetID(), + Path: obj.GetPath(), Name: obj.GetName(), Size: obj.GetSize(), IsDir: obj.IsDir(), @@ -326,6 +330,8 @@ func FsGet(c *gin.Context) { thumb, _ := model.GetThumb(obj) common.SuccessResp(c, FsGetResp{ ObjResp: ObjResp{ + Id: obj.GetID(), + Path: obj.GetPath(), Name: obj.GetName(), Size: obj.GetSize(), IsDir: obj.IsDir(), From ddffacf07b8a63a3e065bdac7c6ea1b2ec63bdc4 Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Sat, 12 Apr 2025 16:55:31 +0800 Subject: [PATCH 162/187] perf: optimize IO read/write usage (#8243) * perf: optimize IO read/write usage * . * Update drivers/139/driver.go Co-authored-by: MadDogOwner --------- Co-authored-by: MadDogOwner --- drivers/115/util.go | 2 +- drivers/123/driver.go | 21 +----- drivers/123/upload.go | 34 +++++---- drivers/139/driver.go | 106 +++++++++++++---------------- drivers/139/util.go | 1 + drivers/189pc/utils.go | 106 ++++++++++++++++++----------- drivers/aliyundrive_open/upload.go | 24 +++---- drivers/baidu_netdisk/driver.go | 46 ++++++++++--- drivers/baidu_photo/driver.go | 50 +++++++++++--- drivers/cloudreve/util.go | 6 +- drivers/github/util.go | 3 +- drivers/ilanzou/driver.go | 27 +++----- drivers/mopan/driver.go | 3 - drivers/netease_music/util.go | 1 - drivers/onedrive/util.go | 2 +- drivers/onedrive_app/util.go | 2 +- drivers/pikpak/util.go | 16 ++--- drivers/quark_uc/driver.go | 73 ++++++++++---------- drivers/thunder/driver.go | 14 ++-- drivers/thunder_browser/driver.go | 21 +++--- drivers/thunderx/driver.go | 19 +++--- internal/archive/archives/utils.go | 3 +- internal/archive/iso9660/utils.go | 13 ++-- internal/fs/archive.go | 8 ++- internal/model/obj.go | 4 +- internal/net/request.go | 3 +- internal/stream/stream.go | 87 ++++++++++------------- internal/stream/util.go | 43 ++++++++++++ server/handles/fsup.go | 30 ++++---- 29 files changed, 427 insertions(+), 341 deletions(-) diff --git a/drivers/115/util.go b/drivers/115/util.go index 7298f565..fc17fe3c 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -405,7 +405,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) { continue } - if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)), + if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { break } diff --git a/drivers/123/driver.go b/drivers/123/driver.go index 7d457138..32c053e2 100644 --- a/drivers/123/driver.go +++ b/drivers/123/driver.go @@ -2,11 +2,8 @@ package _123 import ( "context" - "crypto/md5" "encoding/base64" - "encoding/hex" "fmt" - "io" "net/http" "net/url" "sync" @@ -18,6 +15,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" @@ -187,25 +185,12 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error { func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { etag := file.GetHash().GetHash(utils.MD5) + var err error if len(etag) < utils.MD5.Width { - // const DEFAULT int64 = 10485760 - h := md5.New() - // need to calculate md5 of the full content - tempFile, err := file.CacheFullInTempFile() + _, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5) if err != nil { return err } - defer func() { - _ = tempFile.Close() - }() - if _, err = utils.CopyWithBuffer(h, tempFile); err != nil { - return err - } - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return err - } - etag = hex.EncodeToString(h.Sum(nil)) } data := base.Json{ "driveId": 0, diff --git a/drivers/123/upload.go b/drivers/123/upload.go index dc148c4c..b0482a9f 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "math" "net/http" "strconv" @@ -70,27 +69,33 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F } func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error { - chunkSize := int64(1024 * 1024 * 16) + tmpF, err := file.CacheFullInTempFile() + if err != nil { + return err + } // fetch s3 pre signed urls - chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize))) + size := file.GetSize() + chunkSize := min(size, 16*utils.MB) + chunkCount := int(size / chunkSize) + lastChunkSize := size % chunkSize + if lastChunkSize > 0 { + chunkCount++ + } else { + lastChunkSize = chunkSize + } // only 1 batch is allowed - isMultipart := chunkCount > 1 batchSize := 1 getS3UploadUrl := d.getS3Auth - if isMultipart { + if chunkCount > 1 { batchSize = 10 getS3UploadUrl = d.getS3PreSignedUrls } - limited := driver.NewLimitedUploadStream(ctx, file) for i := 1; i <= chunkCount; i += batchSize { if utils.IsCanceled(ctx) { return ctx.Err() } start := i - end := i + batchSize - if end > chunkCount+1 { - end = chunkCount + 1 - } + end := min(i+batchSize, chunkCount+1) s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end) if err != nil { return err @@ -102,9 +107,9 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi } curSize := chunkSize if j == chunkCount { - curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize + curSize = lastChunkSize } - err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl) + err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl) if err != nil { return err } @@ -115,12 +120,12 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi return d.completeS3(ctx, upReq, file, chunkCount > 1) } -func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error { +func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error { uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] if uploadUrl == "" { return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) } - req, err := http.NewRequest("PUT", uploadUrl, reader) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader)) if err != nil { return err } @@ -143,6 +148,7 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign } s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls // retry + reader.Seek(0, io.SeekStart) return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl) } if res.StatusCode != http.StatusOK { diff --git a/drivers/139/driver.go b/drivers/139/driver.go index f367c431..0af5a4f7 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -2,20 +2,19 @@ package _139 import ( "context" - "encoding/base64" "encoding/xml" "fmt" "io" "net/http" "path" "strconv" - "strings" "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + streamPkg "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/cron" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils/random" @@ -72,28 +71,29 @@ func (d *Yun139) Init(ctx context.Context) error { default: return errs.NotImplement } - if d.ref != nil { - return nil - } - decode, err := base64.StdEncoding.DecodeString(d.Authorization) - if err != nil { - return err - } - decodeStr := string(decode) - splits := strings.Split(decodeStr, ":") - if len(splits) < 2 { - return fmt.Errorf("authorization is invalid, splits < 2") - } - d.Account = splits[1] - _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ - "qryUserExternInfoReq": base.Json{ - "commonAccountInfo": base.Json{ - "account": d.getAccount(), - "accountType": 1, - }, - }, - }, nil) - return err + // if d.ref != nil { + // return nil + // } + // decode, err := base64.StdEncoding.DecodeString(d.Authorization) + // if err != nil { + // return err + // } + // decodeStr := string(decode) + // splits := strings.Split(decodeStr, ":") + // if len(splits) < 2 { + // return fmt.Errorf("authorization is invalid, splits < 2") + // } + // d.Account = splits[1] + // _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ + // "qryUserExternInfoReq": base.Json{ + // "commonAccountInfo": base.Json{ + // "account": d.getAccount(), + // "accountType": 1, + // }, + // }, + // }, nil) + // return err + return nil } func (d *Yun139) InitReference(storage driver.Driver) error { @@ -503,23 +503,15 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { } } -const ( - _ = iota //ignore first value by assigning to blank identifier - KB = 1 << (10 * iota) - MB - GB - TB -) - func (d *Yun139) getPartSize(size int64) int64 { if d.CustomUploadPartSize != 0 { return d.CustomUploadPartSize } // 网盘对于分片数量存在上限 - if size/GB > 30 { - return 512 * MB + if size/utils.GB > 30 { + return 512 * utils.MB } - return 100 * MB + return 100 * utils.MB } func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { @@ -527,29 +519,28 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr case MetaPersonalNew: var err error fullHash := stream.GetHash().GetHash(utils.SHA256) - if len(fullHash) <= 0 { - tmpF, err := stream.CacheFullInTempFile() - if err != nil { - return err - } - fullHash, err = utils.HashFile(utils.SHA256, tmpF) + if len(fullHash) != utils.SHA256.Width { + _, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256) if err != nil { return err } } - partInfos := []PartInfo{} - var partSize = d.getPartSize(stream.GetSize()) - part := (stream.GetSize() + partSize - 1) / partSize - if part == 0 { + size := stream.GetSize() + var partSize = d.getPartSize(size) + part := size / partSize + if size%partSize > 0 { + part++ + } else if part == 0 { part = 1 } + partInfos := make([]PartInfo, 0, part) for i := int64(0); i < part; i++ { if utils.IsCanceled(ctx) { return ctx.Err() } start := i * partSize - byteSize := stream.GetSize() - start + byteSize := size - start if byteSize > partSize { byteSize = partSize } @@ -577,7 +568,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "contentType": "application/octet-stream", "parallelUpload": false, "partInfos": firstPartInfos, - "size": stream.GetSize(), + "size": size, "parentFileId": dstDir.GetID(), "name": stream.GetName(), "type": "file", @@ -630,7 +621,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } // Progress - p := driver.NewProgress(stream.GetSize(), up) + p := driver.NewProgress(size, up) rateLimited := driver.NewLimitedUploadStream(ctx, stream) // 上传所有分片 @@ -790,12 +781,14 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return fmt.Errorf("get file upload url failed with result code: %s, message: %s", resp.Data.Result.ResultCode, resp.Data.Result.ResultDesc) } + size := stream.GetSize() // Progress - p := driver.NewProgress(stream.GetSize(), up) - - var partSize = d.getPartSize(stream.GetSize()) - part := (stream.GetSize() + partSize - 1) / partSize - if part == 0 { + p := driver.NewProgress(size, up) + var partSize = d.getPartSize(size) + part := size / partSize + if size%partSize > 0 { + part++ + } else if part == 0 { part = 1 } rateLimited := driver.NewLimitedUploadStream(ctx, stream) @@ -805,10 +798,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } start := i * partSize - byteSize := stream.GetSize() - start - if byteSize > partSize { - byteSize = partSize - } + byteSize := min(size-start, partSize) limitReader := io.LimitReader(rateLimited, byteSize) // Update Progress @@ -820,7 +810,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr req = req.WithContext(ctx) req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName())) - req.Header.Set("contentSize", strconv.FormatInt(stream.GetSize(), 10)) + req.Header.Set("contentSize", strconv.FormatInt(size, 10)) req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1)) req.Header.Set("uploadtaskID", resp.Data.UploadResult.UploadTaskID) req.Header.Set("rangeType", "0") diff --git a/drivers/139/util.go b/drivers/139/util.go index 3e1a61ed..53defef5 100644 --- a/drivers/139/util.go +++ b/drivers/139/util.go @@ -67,6 +67,7 @@ func (d *Yun139) refreshToken() error { if len(splits) < 3 { return fmt.Errorf("authorization is invalid, splits < 3") } + d.Account = splits[1] strs := strings.Split(splits[2], "|") if len(strs) < 4 { return fmt.Errorf("authorization is invalid, strs < 4") diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index fb1a183a..c391f7e6 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -3,16 +3,15 @@ package _189pc import ( "bytes" "context" - "crypto/md5" "encoding/base64" "encoding/hex" "encoding/xml" "fmt" "io" - "math" "net/http" "net/http/cookiejar" "net/url" + "os" "regexp" "sort" "strconv" @@ -28,6 +27,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/errgroup" "github.com/alist-org/alist/v3/pkg/utils" @@ -473,12 +473,8 @@ func (y *Cloud189PC) refreshSession() (err error) { // 普通上传 // 无法上传大小为0的文件 func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { - var sliceSize = partSize(file.GetSize()) - count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize))) - lastPartSize := file.GetSize() % sliceSize - if file.GetSize() > 0 && lastPartSize == 0 { - lastPartSize = sliceSize - } + size := file.GetSize() + sliceSize := partSize(size) params := Params{ "parentFolderId": dstDir.GetID(), @@ -512,22 +508,29 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo retry.DelayType(retry.BackOffDelay)) sem := semaphore.NewWeighted(3) - fileMd5 := md5.New() - silceMd5 := md5.New() + count := int(size / sliceSize) + lastPartSize := size % sliceSize + if lastPartSize > 0 { + count++ + } else { + lastPartSize = sliceSize + } + fileMd5 := utils.MD5.NewFunc() + silceMd5 := utils.MD5.NewFunc() silceMd5Hexs := make([]string, 0, count) - + teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)) + byteSize := sliceSize for i := 1; i <= count; i++ { if utils.IsCanceled(upCtx) { break } - byteData := make([]byte, sliceSize) if i == count { - byteData = byteData[:lastPartSize] + byteSize = lastPartSize } - + byteData := make([]byte, byteSize) // 读取块 silceMd5.Reset() - if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil { + if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil { sem.Release(1) return nil, err } @@ -607,24 +610,43 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m // 快传 func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { - tempFile, err := file.CacheFullInTempFile() - if err != nil { - return nil, err + var ( + cache = file.GetFile() + tmpF *os.File + err error + ) + size := file.GetSize() + if _, ok := cache.(io.ReaderAt); !ok && size > 0 { + tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return nil, err + } + defer func() { + _ = tmpF.Close() + _ = os.Remove(tmpF.Name()) + }() + cache = tmpF } - - var sliceSize = partSize(file.GetSize()) - count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize))) - lastSliceSize := file.GetSize() % sliceSize - if file.GetSize() > 0 && lastSliceSize == 0 { + sliceSize := partSize(size) + count := int(size / sliceSize) + lastSliceSize := size % sliceSize + if lastSliceSize > 0 { + count++ + } else { lastSliceSize = sliceSize } //step.1 优先计算所需信息 byteSize := sliceSize - fileMd5 := md5.New() - silceMd5 := md5.New() - silceMd5Hexs := make([]string, 0, count) + fileMd5 := utils.MD5.NewFunc() + sliceMd5 := utils.MD5.NewFunc() + sliceMd5Hexs := make([]string, 0, count) partInfos := make([]string, 0, count) + writers := []io.Writer{fileMd5, sliceMd5} + if tmpF != nil { + writers = append(writers, tmpF) + } + written := int64(0) for i := 1; i <= count; i++ { if utils.IsCanceled(ctx) { return nil, ctx.Err() @@ -634,19 +656,31 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode byteSize = lastSliceSize } - silceMd5.Reset() - if _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF { + n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), file, byteSize) + written += n + if err != nil && err != io.EOF { return nil, err } - md5Byte := silceMd5.Sum(nil) - silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte))) + md5Byte := sliceMd5.Sum(nil) + sliceMd5Hexs = append(sliceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte))) partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte))) + sliceMd5.Reset() + } + + if tmpF != nil { + if size > 0 && written != size { + return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, size) + } + _, err = tmpF.Seek(0, io.SeekStart) + if err != nil { + return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ") + } } fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil))) sliceMd5Hex := fileMd5Hex - if file.GetSize() > sliceSize { - sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n"))) + if size > sliceSize { + sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(sliceMd5Hexs, "\n"))) } fullUrl := UPLOAD_URL @@ -712,7 +746,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode } // step.4 上传切片 - _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize), isFamily) + _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily) if err != nil { return err } @@ -794,11 +828,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo // 旧版本上传,家庭云不支持覆盖 func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { - tempFile, err := file.CacheFullInTempFile() - if err != nil { - return nil, err - } - fileMd5, err := utils.HashFile(utils.MD5, tempFile) + tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5) if err != nil { return nil, err } diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index fb730de6..4114c195 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -1,7 +1,6 @@ package aliyundrive_open import ( - "bytes" "context" "encoding/base64" "fmt" @@ -15,6 +14,7 @@ import ( "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + streamPkg "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" "github.com/avast/retry-go" @@ -131,16 +131,19 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error return "", err } length := proofRange.End - proofRange.Start - buf := bytes.NewBuffer(make([]byte, 0, length)) reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length}) if err != nil { return "", err } - _, err = utils.CopyWithBufferN(buf, reader, length) + buf := make([]byte, length) + n, err := io.ReadFull(reader, buf) + if err == io.ErrUnexpectedEOF { + return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n) + } if err != nil { return "", err } - return base64.StdEncoding.EncodeToString(buf.Bytes()), nil + return base64.StdEncoding.EncodeToString(buf), nil } func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { @@ -183,25 +186,18 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m _, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { req.SetBody(createData).SetResult(&createResp) }) - var tmpF model.File if err != nil { if e.Code != "PreHashMatched" || !rapidUpload { return nil, err } log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload") - hi := stream.GetHash() - hash := hi.GetHash(utils.SHA1) - if len(hash) <= 0 { - tmpF, err = stream.CacheFullInTempFile() + hash := stream.GetHash().GetHash(utils.SHA1) + if len(hash) != utils.SHA1.Width { + _, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1) if err != nil { return nil, err } - hash, err = utils.HashFile(utils.SHA1, tmpF) - if err != nil { - return nil, err - } - } delete(createData, "pre_hash") diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 3cc1ae9e..c33e0b32 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -6,8 +6,8 @@ import ( "encoding/hex" "errors" "io" - "math" "net/url" + "os" stdpath "path" "strconv" "time" @@ -15,6 +15,7 @@ import ( "golang.org/x/sync/semaphore" "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" @@ -185,16 +186,30 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F return newObj, nil } - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return nil, err + var ( + cache = stream.GetFile() + tmpF *os.File + err error + ) + if _, ok := cache.(io.ReaderAt); !ok { + tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return nil, err + } + defer func() { + _ = tmpF.Close() + _ = os.Remove(tmpF.Name()) + }() + cache = tmpF } streamSize := stream.GetSize() sliceSize := d.getSliceSize(streamSize) - count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1)) + count := int(streamSize / sliceSize) lastBlockSize := streamSize % sliceSize - if streamSize > 0 && lastBlockSize == 0 { + if lastBlockSize > 0 { + count++ + } else { lastBlockSize = sliceSize } @@ -207,6 +222,11 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F sliceMd5H := md5.New() sliceMd5H2 := md5.New() slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize) + writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write} + if tmpF != nil { + writers = append(writers, tmpF) + } + written := int64(0) for i := 1; i <= count; i++ { if utils.IsCanceled(ctx) { @@ -215,13 +235,23 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F if i == count { byteSize = lastBlockSize } - _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize) + n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize) + written += n if err != nil && err != io.EOF { return nil, err } blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil))) sliceMd5H.Reset() } + if tmpF != nil { + if written != streamSize { + return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize) + } + _, err = tmpF.Seek(0, io.SeekStart) + if err != nil { + return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ") + } + } contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil)) sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil)) blockListStr, _ := utils.Json.MarshalToString(blockList) @@ -291,7 +321,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F "partseq": strconv.Itoa(partseq), } err := d.uploadSlice(ctx, params, stream.GetName(), - driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize))) + driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))) if err != nil { return err } diff --git a/drivers/baidu_photo/driver.go b/drivers/baidu_photo/driver.go index eeee746f..5a34fcb4 100644 --- a/drivers/baidu_photo/driver.go +++ b/drivers/baidu_photo/driver.go @@ -7,7 +7,7 @@ import ( "errors" "fmt" "io" - "math" + "os" "regexp" "strconv" "strings" @@ -16,6 +16,7 @@ import ( "golang.org/x/sync/semaphore" "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" @@ -241,11 +242,21 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil // TODO: // 暂时没有找到妙传方式 - - // 需要获取完整文件md5,必须支持 io.Seek - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return nil, err + var ( + cache = stream.GetFile() + tmpF *os.File + err error + ) + if _, ok := cache.(io.ReaderAt); !ok { + tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return nil, err + } + defer func() { + _ = tmpF.Close() + _ = os.Remove(tmpF.Name()) + }() + cache = tmpF } const DEFAULT int64 = 1 << 22 @@ -253,9 +264,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil // 计算需要的数据 streamSize := stream.GetSize() - count := int(math.Ceil(float64(streamSize) / float64(DEFAULT))) + count := int(streamSize / DEFAULT) lastBlockSize := streamSize % DEFAULT - if lastBlockSize == 0 { + if lastBlockSize > 0 { + count++ + } else { lastBlockSize = DEFAULT } @@ -266,6 +279,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil sliceMd5H := md5.New() sliceMd5H2 := md5.New() slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize) + writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write} + if tmpF != nil { + writers = append(writers, tmpF) + } + written := int64(0) for i := 1; i <= count; i++ { if utils.IsCanceled(ctx) { return nil, ctx.Err() @@ -273,13 +291,23 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if i == count { byteSize = lastBlockSize } - _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize) + n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize) + written += n if err != nil && err != io.EOF { return nil, err } sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil))) sliceMd5H.Reset() } + if tmpF != nil { + if written != streamSize { + return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize) + } + _, err = tmpF.Seek(0, io.SeekStart) + if err != nil { + return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ") + } + } contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil)) sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil)) blockListStr, _ := utils.Json.MarshalToString(sliceMD5List) @@ -291,7 +319,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil "rtype": "1", "ctype": "11", "path": fmt.Sprintf("/%s", stream.GetName()), - "size": fmt.Sprint(stream.GetSize()), + "size": fmt.Sprint(streamSize), "slice-md5": sliceMd5, "content-md5": contentMd5, "block_list": blockListStr, @@ -343,7 +371,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil r.SetContext(ctx) r.SetQueryParams(uploadParams) r.SetFileReader("file", stream.GetName(), - driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize))) + driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))) }, nil) if err != nil { return err diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index 1fd5ed8a..196d7303 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -204,7 +204,7 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up req.SetContentLength(true) req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10)) req.SetHeader("User-Agent", d.getUA()) - req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) + req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) }, nil) if err != nil { break @@ -239,7 +239,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U return err } req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), - driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) + driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) if err != nil { return err } @@ -280,7 +280,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u if err != nil { return err } - req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) if err != nil { return err } diff --git a/drivers/github/util.go b/drivers/github/util.go index 03318784..7ddf8746 100644 --- a/drivers/github/util.go +++ b/drivers/github/util.go @@ -5,7 +5,6 @@ import ( "context" "errors" "fmt" - "io" "strings" "text/template" "time" @@ -159,7 +158,7 @@ func signCommit(m *map[string]interface{}, entity *openpgp.Entity) (string, erro if err != nil { return "", err } - if _, err = io.Copy(armorWriter, &sigBuffer); err != nil { + if _, err = utils.CopyWithBuffer(armorWriter, &sigBuffer); err != nil { return "", err } _ = armorWriter.Close() diff --git a/drivers/ilanzou/driver.go b/drivers/ilanzou/driver.go index 39a311dd..044193d3 100644 --- a/drivers/ilanzou/driver.go +++ b/drivers/ilanzou/driver.go @@ -2,7 +2,6 @@ package template import ( "context" - "crypto/md5" "encoding/base64" "encoding/hex" "fmt" @@ -17,6 +16,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/foxxorcat/mopan-sdk-go" "github.com/go-resty/resty/v2" @@ -273,23 +273,14 @@ func (d *ILanZou) Remove(ctx context.Context, obj model.Obj) error { const DefaultPartSize = 1024 * 1024 * 8 func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - h := md5.New() - // need to calculate md5 of the full content - tempFile, err := s.CacheFullInTempFile() - if err != nil { - return nil, err + etag := s.GetHash().GetHash(utils.MD5) + var err error + if len(etag) != utils.MD5.Width { + _, etag, err = stream.CacheFullInTempFileAndHash(s, utils.MD5) + if err != nil { + return nil, err + } } - defer func() { - _ = tempFile.Close() - }() - if _, err = utils.CopyWithBuffer(h, tempFile); err != nil { - return nil, err - } - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return nil, err - } - etag := hex.EncodeToString(h.Sum(nil)) // get upToken res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ @@ -309,7 +300,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli()) reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: &driver.SimpleReaderWithSize{ - Reader: tempFile, + Reader: s, Size: s.GetSize(), }, UpdateProgress: up, diff --git a/drivers/mopan/driver.go b/drivers/mopan/driver.go index 736d612a..f8f14300 100644 --- a/drivers/mopan/driver.go +++ b/drivers/mopan/driver.go @@ -269,9 +269,6 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre if err != nil { return nil, err } - defer func() { - _ = file.Close() - }() // step.1 uploadPartData, err := mopan.InitUploadPartData(ctx, mopan.UpdloadFileParam{ diff --git a/drivers/netease_music/util.go b/drivers/netease_music/util.go index 2e78be14..21718106 100644 --- a/drivers/netease_music/util.go +++ b/drivers/netease_music/util.go @@ -227,7 +227,6 @@ func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStrea if err != nil { return err } - defer tmp.Close() u := uploader{driver: d, file: tmp} diff --git a/drivers/onedrive/util.go b/drivers/onedrive/util.go index 55434967..e256b7ae 100644 --- a/drivers/onedrive/util.go +++ b/drivers/onedrive/util.go @@ -220,7 +220,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil if err != nil { return err } - req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) if err != nil { return err } diff --git a/drivers/onedrive_app/util.go b/drivers/onedrive_app/util.go index 1b01324e..5c3b6c92 100644 --- a/drivers/onedrive_app/util.go +++ b/drivers/onedrive_app/util.go @@ -170,7 +170,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model. if err != nil { return err } - req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData))) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) if err != nil { return err } diff --git a/drivers/pikpak/util.go b/drivers/pikpak/util.go index 61396aa4..f88f085c 100644 --- a/drivers/pikpak/util.go +++ b/drivers/pikpak/util.go @@ -7,13 +7,6 @@ import ( "crypto/sha1" "encoding/hex" "fmt" - "github.com/alist-org/alist/v3/internal/driver" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/op" - "github.com/alist-org/alist/v3/pkg/utils" - "github.com/aliyun/aliyun-oss-go-sdk/oss" - jsoniter "github.com/json-iterator/go" - "github.com/pkg/errors" "io" "net/http" "path/filepath" @@ -24,7 +17,14 @@ import ( "time" "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/go-resty/resty/v2" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" ) var AndroidAlgorithms = []string{ @@ -516,7 +516,7 @@ func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSi continue } - b := driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)) + b := driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)) if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil { break } diff --git a/drivers/quark_uc/driver.go b/drivers/quark_uc/driver.go index 0f8884fa..7f497494 100644 --- a/drivers/quark_uc/driver.go +++ b/drivers/quark_uc/driver.go @@ -3,9 +3,8 @@ package quark import ( "bytes" "context" - "crypto/md5" - "crypto/sha1" "encoding/hex" + "hash" "io" "net/http" "time" @@ -14,6 +13,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + streamPkg "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" log "github.com/sirupsen/logrus" @@ -136,33 +136,33 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error { } func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return err + md5Str, sha1Str := stream.GetHash().GetHash(utils.MD5), stream.GetHash().GetHash(utils.SHA1) + var ( + md5 hash.Hash + sha1 hash.Hash + ) + writers := []io.Writer{} + if len(md5Str) != utils.MD5.Width { + md5 = utils.MD5.NewFunc() + writers = append(writers, md5) } - defer func() { - _ = tempFile.Close() - }() - m := md5.New() - _, err = utils.CopyWithBuffer(m, tempFile) - if err != nil { - return err + if len(sha1Str) != utils.SHA1.Width { + sha1 = utils.SHA1.NewFunc() + writers = append(writers, sha1) } - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return err + + if len(writers) > 0 { + _, err := streamPkg.CacheFullInTempFileAndWriter(stream, io.MultiWriter(writers...)) + if err != nil { + return err + } + if md5 != nil { + md5Str = hex.EncodeToString(md5.Sum(nil)) + } + if sha1 != nil { + sha1Str = hex.EncodeToString(sha1.Sum(nil)) + } } - md5Str := hex.EncodeToString(m.Sum(nil)) - s := sha1.New() - _, err = utils.CopyWithBuffer(s, tempFile) - if err != nil { - return err - } - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return err - } - sha1Str := hex.EncodeToString(s.Sum(nil)) // pre pre, err := d.upPre(stream, dstDir.GetID()) if err != nil { @@ -178,27 +178,28 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File return nil } // part up - partSize := pre.Metadata.PartSize - var part []byte - md5s := make([]string, 0) - defaultBytes := make([]byte, partSize) total := stream.GetSize() left := total + partSize := int64(pre.Metadata.PartSize) + part := make([]byte, partSize) + count := int(total / partSize) + if total%partSize > 0 { + count++ + } + md5s := make([]string, 0, count) partNumber := 1 for left > 0 { if utils.IsCanceled(ctx) { return ctx.Err() } - if left > int64(partSize) { - part = defaultBytes - } else { - part = make([]byte, left) + if left < partSize { + part = part[:left] } - _, err := io.ReadFull(tempFile, part) + n, err := io.ReadFull(stream, part) if err != nil { return err } - left -= int64(len(part)) + left -= int64(n) log.Debugf("left: %d", left) reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part)) m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader) diff --git a/drivers/thunder/driver.go b/drivers/thunder/driver.go index 7f41d003..51396ee8 100644 --- a/drivers/thunder/driver.go +++ b/drivers/thunder/driver.go @@ -12,6 +12,7 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/aws/aws-sdk-go/aws" @@ -333,22 +334,17 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error { } func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { - hi := file.GetHash() - gcid := hi.GetHash(hash_extend.GCID) + gcid := file.GetHash().GetHash(hash_extend.GCID) + var err error if len(gcid) < hash_extend.GCID.Width { - tFile, err := file.CacheFullInTempFile() - if err != nil { - return err - } - - gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize()) + _, gcid, err = stream.CacheFullInTempFileAndHash(file, hash_extend.GCID, file.GetSize()) if err != nil { return err } } var resp UploadTaskResponse - _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { + _, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { r.SetContext(ctx) r.SetBody(&base.Json{ "kind": FILE, diff --git a/drivers/thunder_browser/driver.go b/drivers/thunder_browser/driver.go index 7ce71f7d..0b38d077 100644 --- a/drivers/thunder_browser/driver.go +++ b/drivers/thunder_browser/driver.go @@ -4,10 +4,15 @@ import ( "context" "errors" "fmt" + "io" + "net/http" + "strings" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + streamPkg "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/aws/aws-sdk-go/aws" @@ -15,9 +20,6 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/go-resty/resty/v2" - "io" - "net/http" - "strings" ) type ThunderBrowser struct { @@ -456,15 +458,10 @@ func (xc *XunLeiBrowserCommon) Remove(ctx context.Context, obj model.Obj) error } func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - hi := stream.GetHash() - gcid := hi.GetHash(hash_extend.GCID) + gcid := stream.GetHash().GetHash(hash_extend.GCID) + var err error if len(gcid) < hash_extend.GCID.Width { - tFile, err := stream.CacheFullInTempFile() - if err != nil { - return err - } - - gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize()) + _, gcid, err = streamPkg.CacheFullInTempFileAndHash(stream, hash_extend.GCID, stream.GetSize()) if err != nil { return err } @@ -481,7 +478,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream } var resp UploadTaskResponse - _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { + _, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { r.SetContext(ctx) r.SetBody(&js) }, &resp) diff --git a/drivers/thunderx/driver.go b/drivers/thunderx/driver.go index 2194bdc6..6ee8901a 100644 --- a/drivers/thunderx/driver.go +++ b/drivers/thunderx/driver.go @@ -3,11 +3,15 @@ package thunderx import ( "context" "fmt" + "net/http" + "strings" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/aws/aws-sdk-go/aws" @@ -15,8 +19,6 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/go-resty/resty/v2" - "net/http" - "strings" ) type ThunderX struct { @@ -364,22 +366,17 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error { } func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { - hi := file.GetHash() - gcid := hi.GetHash(hash_extend.GCID) + gcid := file.GetHash().GetHash(hash_extend.GCID) + var err error if len(gcid) < hash_extend.GCID.Width { - tFile, err := file.CacheFullInTempFile() - if err != nil { - return err - } - - gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize()) + _, gcid, err = stream.CacheFullInTempFileAndHash(file, hash_extend.GCID, file.GetSize()) if err != nil { return err } } var resp UploadTaskResponse - _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { + _, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { r.SetContext(ctx) r.SetBody(&base.Json{ "kind": FILE, diff --git a/internal/archive/archives/utils.go b/internal/archive/archives/utils.go index fdae1009..2f499a10 100644 --- a/internal/archive/archives/utils.go +++ b/internal/archive/archives/utils.go @@ -10,6 +10,7 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/utils" "github.com/mholt/archives" ) @@ -73,7 +74,7 @@ func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgres return err } defer f.Close() - _, err = io.Copy(f, &stream.ReaderUpdatingProgress{ + _, err = utils.CopyWithBuffer(f, &stream.ReaderUpdatingProgress{ Reader: &stream.SimpleReaderWithSize{ Reader: rc, Size: stat.Size(), diff --git a/internal/archive/iso9660/utils.go b/internal/archive/iso9660/utils.go index 12de8e6e..0e4cfb1c 100644 --- a/internal/archive/iso9660/utils.go +++ b/internal/archive/iso9660/utils.go @@ -1,14 +1,15 @@ package iso9660 import ( - "github.com/alist-org/alist/v3/internal/errs" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/kdomanski/iso9660" - "io" "os" stdpath "path" "strings" + + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/kdomanski/iso9660" ) func getImage(ss *stream.SeekableStream) (*iso9660.Image, error) { @@ -66,7 +67,7 @@ func decompress(f *iso9660.File, path string, up model.UpdateProgress) error { return err } defer file.Close() - _, err = io.Copy(file, &stream.ReaderUpdatingProgress{ + _, err = utils.CopyWithBuffer(file, &stream.ReaderUpdatingProgress{ Reader: &stream.SimpleReaderWithSize{ Reader: f.Reader(), Size: f.Size(), diff --git a/internal/fs/archive.go b/internal/fs/archive.go index b056decf..dbae9b33 100644 --- a/internal/fs/archive.go +++ b/internal/fs/archive.go @@ -90,9 +90,11 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT t.SetTotalBytes(total) t.status = "getting src object" for _, s := range ss { - _, err = s.CacheFullInTempFileAndUpdateProgress(func(p float64) { - t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total)) - }) + if s.GetFile() == nil { + _, err = stream.CacheFullInTempFileAndUpdateProgress(s, func(p float64) { + t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total)) + }) + } cur += s.GetSize() if err != nil { return nil, err diff --git a/internal/model/obj.go b/internal/model/obj.go index 552b1241..f0fce7a1 100644 --- a/internal/model/obj.go +++ b/internal/model/obj.go @@ -2,6 +2,7 @@ package model import ( "io" + "os" "sort" "strings" "time" @@ -48,7 +49,8 @@ type FileStreamer interface { RangeRead(http_range.Range) (io.Reader, error) //for a non-seekable Stream, if Read is called, this function won't work CacheFullInTempFile() (File, error) - CacheFullInTempFileAndUpdateProgress(up UpdateProgress) (File, error) + SetTmpFile(r *os.File) + GetFile() File } type UpdateProgress func(percentage float64) diff --git a/internal/net/request.go b/internal/net/request.go index d4f9321c..a1ff6d20 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -248,8 +248,9 @@ func (d *downloader) sendChunkTask(newConcurrency bool) error { size: finalSize, id: d.nextChunk, buf: buf, + + newConcurrency: newConcurrency, } - ch.newConcurrency = newConcurrency d.pos += finalSize d.nextChunk++ d.chunkChannel <- ch diff --git a/internal/stream/stream.go b/internal/stream/stream.go index f6b045a0..64160915 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -94,27 +94,17 @@ func (f *FileStream) CacheFullInTempFile() (model.File, error) { f.Add(tmpF) f.tmpFile = tmpF f.Reader = tmpF - return f.tmpFile, nil + return tmpF, nil } -func (f *FileStream) CacheFullInTempFileAndUpdateProgress(up model.UpdateProgress) (model.File, error) { +func (f *FileStream) GetFile() model.File { if f.tmpFile != nil { - return f.tmpFile, nil + return f.tmpFile } if file, ok := f.Reader.(model.File); ok { - return file, nil + return file } - tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{ - Reader: f, - UpdateProgress: up, - }, f.GetSize()) - if err != nil { - return nil, err - } - f.Add(tmpF) - f.tmpFile = tmpF - f.Reader = tmpF - return f.tmpFile, nil + return nil } const InMemoryBufMaxSize = 10 // Megabytes @@ -127,31 +117,36 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { // 参考 internal/net/request.go httpRange.Length = f.GetSize() - httpRange.Start } - if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) { + size := httpRange.Start + httpRange.Length + if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) { return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil } - if f.tmpFile == nil { - if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil { - bufSize := utils.Min(httpRange.Length, f.GetSize()) - newBuf := bytes.NewBuffer(make([]byte, 0, bufSize)) - n, err := utils.CopyWithBufferN(newBuf, f.Reader, bufSize) + var cache io.ReaderAt = f.GetFile() + if cache == nil { + if size <= InMemoryBufMaxSizeBytes { + bufSize := min(size, f.GetSize()) + // 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom + // 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大 + buf := make([]byte, bufSize) + n, err := io.ReadFull(f.Reader, buf) if err != nil { return nil, err } - if n != bufSize { + if n != int(bufSize) { return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n) } - f.peekBuff = bytes.NewReader(newBuf.Bytes()) + f.peekBuff = bytes.NewReader(buf) f.Reader = io.MultiReader(f.peekBuff, f.Reader) - return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil + cache = f.peekBuff } else { - _, err := f.CacheFullInTempFile() + var err error + cache, err = f.CacheFullInTempFile() if err != nil { return nil, err } } } - return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil + return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil } var _ model.FileStreamer = (*SeekableStream)(nil) @@ -176,13 +171,13 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) if len(fs.Mimetype) == 0 { fs.Mimetype = utils.GetMimeType(fs.Obj.GetName()) } - ss := SeekableStream{FileStream: fs, Link: link} + ss := &SeekableStream{FileStream: fs, Link: link} if ss.Reader != nil { result, ok := ss.Reader.(model.File) if ok { ss.mFile = result ss.Closers.Add(result) - return &ss, nil + return ss, nil } } if ss.Link != nil { @@ -198,7 +193,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) ss.mFile = mFile ss.Reader = mFile ss.Closers.Add(mFile) - return &ss, nil + return ss, nil } if ss.Link.RangeReadCloser != nil { ss.rangeReadCloser = &RateLimitRangeReadCloser{ @@ -206,7 +201,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) Limiter: ServerDownloadLimit, } ss.Add(ss.rangeReadCloser) - return &ss, nil + return ss, nil } if len(ss.Link.URL) > 0 { rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link) @@ -219,10 +214,12 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) } ss.rangeReadCloser = rrc ss.Add(rrc) - return &ss, nil + return ss, nil } } - + if fs.Reader != nil { + return ss, nil + } return nil, fmt.Errorf("illegal seekableStream") } @@ -248,7 +245,7 @@ func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, erro } return rc, nil } - return nil, fmt.Errorf("can't find mFile or rangeReadCloser") + return ss.FileStream.RangeRead(httpRange) } //func (f *FileStream) GetReader() io.Reader { @@ -278,7 +275,7 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) { if ss.tmpFile != nil { return ss.tmpFile, nil } - if _, ok := ss.mFile.(*os.File); ok { + if ss.mFile != nil { return ss.mFile, nil } tmpF, err := utils.CreateTempFile(ss, ss.GetSize()) @@ -288,27 +285,17 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) { ss.Add(tmpF) ss.tmpFile = tmpF ss.Reader = tmpF - return ss.tmpFile, nil + return tmpF, nil } -func (ss *SeekableStream) CacheFullInTempFileAndUpdateProgress(up model.UpdateProgress) (model.File, error) { +func (ss *SeekableStream) GetFile() model.File { if ss.tmpFile != nil { - return ss.tmpFile, nil + return ss.tmpFile } - if _, ok := ss.mFile.(*os.File); ok { - return ss.mFile, nil + if ss.mFile != nil { + return ss.mFile } - tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{ - Reader: ss, - UpdateProgress: up, - }, ss.GetSize()) - if err != nil { - return nil, err - } - ss.Add(tmpF) - ss.tmpFile = tmpF - ss.Reader = tmpF - return ss.tmpFile, nil + return nil } func (f *FileStream) SetTmpFile(r *os.File) { diff --git a/internal/stream/util.go b/internal/stream/util.go index 01019482..5b935a90 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -2,6 +2,7 @@ package stream import ( "context" + "encoding/hex" "fmt" "io" "net/http" @@ -96,3 +97,45 @@ func (r *ReaderWithCtx) Close() error { } return nil } + +func CacheFullInTempFileAndUpdateProgress(stream model.FileStreamer, up model.UpdateProgress) (model.File, error) { + if cache := stream.GetFile(); cache != nil { + up(100) + return cache, nil + } + tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{ + Reader: stream, + UpdateProgress: up, + }, stream.GetSize()) + if err == nil { + stream.SetTmpFile(tmpF) + } + return tmpF, err +} + +func CacheFullInTempFileAndWriter(stream model.FileStreamer, w io.Writer) (model.File, error) { + if cache := stream.GetFile(); cache != nil { + _, err := cache.Seek(0, io.SeekStart) + if err == nil { + _, err = utils.CopyWithBuffer(w, cache) + if err == nil { + _, err = cache.Seek(0, io.SeekStart) + } + } + return cache, err + } + tmpF, err := utils.CreateTempFile(io.TeeReader(stream, w), stream.GetSize()) + if err == nil { + stream.SetTmpFile(tmpF) + } + return tmpF, err +} + +func CacheFullInTempFileAndHash(stream model.FileStreamer, hashType *utils.HashType, params ...any) (model.File, string, error) { + h := hashType.NewFunc(params...) + tmpF, err := CacheFullInTempFileAndWriter(stream, h) + if err != nil { + return nil, "", err + } + return tmpF, hex.EncodeToString(h.Sum(nil)), err +} diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 15a6328b..41344fb8 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -1,8 +1,6 @@ package handles import ( - "github.com/alist-org/alist/v3/internal/task" - "github.com/alist-org/alist/v3/pkg/utils" "io" "net/url" stdpath "path" @@ -12,6 +10,8 @@ import ( "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" + "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" ) @@ -44,7 +44,7 @@ func FsStream(c *gin.Context) { } if !overwrite { if res, _ := fs.Get(c, path, &fs.GetArgs{NoLog: true}); res != nil { - _, _ = io.Copy(io.Discard, c.Request.Body) + _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body) common.ErrorStrResp(c, "file exists", 403) return } @@ -66,6 +66,10 @@ func FsStream(c *gin.Context) { if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" { h[utils.SHA256] = sha256 } + mimetype := c.GetHeader("Content-Type") + if len(mimetype) == 0 { + mimetype = utils.GetMimeType(name) + } s := &stream.FileStream{ Obj: &model.Object{ Name: name, @@ -74,7 +78,7 @@ func FsStream(c *gin.Context) { HashInfo: utils.NewHashInfoByMap(h), }, Reader: c.Request.Body, - Mimetype: c.GetHeader("Content-Type"), + Mimetype: mimetype, WebPutAsTask: asTask, } var t task.TaskExtensionInfo @@ -89,6 +93,9 @@ func FsStream(c *gin.Context) { return } if t == nil { + if n, _ := io.ReadFull(c.Request.Body, []byte{0}); n == 1 { + _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body) + } common.SuccessResp(c) return } @@ -114,7 +121,7 @@ func FsForm(c *gin.Context) { } if !overwrite { if res, _ := fs.Get(c, path, &fs.GetArgs{NoLog: true}); res != nil { - _, _ = io.Copy(io.Discard, c.Request.Body) + _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body) common.ErrorStrResp(c, "file exists", 403) return } @@ -150,6 +157,10 @@ func FsForm(c *gin.Context) { if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" { h[utils.SHA256] = sha256 } + mimetype := file.Header.Get("Content-Type") + if len(mimetype) == 0 { + mimetype = utils.GetMimeType(name) + } s := stream.FileStream{ Obj: &model.Object{ Name: name, @@ -158,7 +169,7 @@ func FsForm(c *gin.Context) { HashInfo: utils.NewHashInfoByMap(h), }, Reader: f, - Mimetype: file.Header.Get("Content-Type"), + Mimetype: mimetype, WebPutAsTask: asTask, } var t task.TaskExtensionInfo @@ -168,12 +179,7 @@ func FsForm(c *gin.Context) { }{f} t, err = fs.PutAsTask(c, dir, &s) } else { - ss, err := stream.NewSeekableStream(s, nil) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - err = fs.PutDirectly(c, dir, ss, true) + err = fs.PutDirectly(c, dir, &s, true) } if err != nil { common.ErrorResp(c, err, 500) From a4bfbf8a83f39708b1890d700608dc3b3737501a Mon Sep 17 00:00:00 2001 From: jerry <109275116+jerry-harm@users.noreply.github.com> Date: Sat, 12 Apr 2025 17:01:30 +0800 Subject: [PATCH 163/187] fix(ipfs): fix problems (#8252) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: :bug: (ipfs): fix the list error caused by not proper join path function 使用更加规范的路径拼接,修复了有中文或符号的路径无法正常访问的问题 * refactor: 命名规范 * 删除多余的条件判断 * fix: 使用withresult方法重构代码,添加get方法,提高性能 * fix: 允许get方法获取目录 去除多余的判断 * fix: 允许copy,rename,move进行覆写 * fix: 修复move方法导致的目录被删除 * refactor: 整理关于返回Path的代码 * fix: 修复由于get方法导致的ipfs路径无法访问 * fix: 修复path处理错误的get方法 修复get方法,删除意外加入的目录 * fix: fix path join use path join instead of filepath join to avoid os problem * fix: rm filepath ref --------- Co-authored-by: Andy Hsu --- drivers/ipfs_api/driver.go | 138 ++++++++++++++++++++++++------------- drivers/ipfs_api/meta.go | 4 +- 2 files changed, 92 insertions(+), 50 deletions(-) diff --git a/drivers/ipfs_api/driver.go b/drivers/ipfs_api/driver.go index e59da7ca..264cef28 100644 --- a/drivers/ipfs_api/driver.go +++ b/drivers/ipfs_api/driver.go @@ -4,8 +4,7 @@ import ( "context" "fmt" "net/url" - "path/filepath" - "strings" + "path" shell "github.com/ipfs/go-ipfs-api" @@ -43,78 +42,115 @@ func (d *IPFS) Drop(ctx context.Context) error { } func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { - path := dir.GetPath() - switch d.Mode { - case "ipfs": - path, _ = url.JoinPath("/ipfs", path) - case "ipns": - path, _ = url.JoinPath("/ipns", path) - case "mfs": - fileStat, err := d.sh.FilesStat(ctx, path) - if err != nil { - return nil, err + var ipfsPath string + cid := dir.GetID() + if cid != "" { + ipfsPath = path.Join("/ipfs", cid) + } else { + // 可能出现ipns dns解析失败的情况,需要重复获取cid,其他情况应该不会出错 + ipfsPath = dir.GetPath() + switch d.Mode { + case "ipfs": + ipfsPath = path.Join("/ipfs", ipfsPath) + case "ipns": + ipfsPath = path.Join("/ipns", ipfsPath) + case "mfs": + fileStat, err := d.sh.FilesStat(ctx, ipfsPath) + if err != nil { + return nil, err + } + ipfsPath = path.Join("/ipfs", fileStat.Hash) + default: + return nil, fmt.Errorf("mode error") } - path, _ = url.JoinPath("/ipfs", fileStat.Hash) - default: - return nil, fmt.Errorf("mode error") } - - dirs, err := d.sh.List(path) + dirs, err := d.sh.List(ipfsPath) if err != nil { return nil, err } objlist := []model.Obj{} for _, file := range dirs { - gateurl := *d.gateURL.JoinPath("/ipfs/" + file.Hash) - gateurl.RawQuery = "filename=" + url.PathEscape(file.Name) - objlist = append(objlist, &model.ObjectURL{ - Object: model.Object{ID: "/ipfs/" + file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1}, - Url: model.Url{Url: gateurl.String()}, - }) + objlist = append(objlist, &model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1}) } return objlist, nil } func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { - gateurl := d.gateURL.JoinPath(file.GetID()) - gateurl.RawQuery = "filename=" + url.PathEscape(file.GetName()) + gateurl := d.gateURL.JoinPath("/ipfs/", file.GetID()) + gateurl.RawQuery = "filename=" + url.QueryEscape(file.GetName()) return &model.Link{URL: gateurl.String()}, nil } -func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { - if d.Mode != "mfs" { - return fmt.Errorf("only write in mfs mode") +func (d *IPFS) Get(ctx context.Context, rawPath string) (model.Obj, error) { + rawPath = path.Join(d.GetRootPath(), rawPath) + var ipfsPath string + switch d.Mode { + case "ipfs": + ipfsPath = path.Join("/ipfs", rawPath) + case "ipns": + ipfsPath = path.Join("/ipns", rawPath) + case "mfs": + fileStat, err := d.sh.FilesStat(ctx, rawPath) + if err != nil { + return nil, err + } + ipfsPath = path.Join("/ipfs", fileStat.Hash) + default: + return nil, fmt.Errorf("mode error") } - path := parentDir.GetPath() - if path[len(path):] != "/" { - path += "/" + file, err := d.sh.FilesStat(ctx, ipfsPath) + if err != nil { + return nil, err } - return d.sh.FilesMkdir(ctx, path+dirName) + return &model.Object{ID: file.Hash, Name: path.Base(rawPath), Path: rawPath, Size: int64(file.Size), IsFolder: file.Type == "directory"}, nil } -func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error { +func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { if d.Mode != "mfs" { - return fmt.Errorf("only write in mfs mode") + return nil, fmt.Errorf("only write in mfs mode") } - return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath()) + dirPath := parentDir.GetPath() + err := d.sh.FilesMkdir(ctx, path.Join(dirPath, dirName), shell.FilesMkdir.Parents(true)) + if err != nil { + return nil, err + } + file, err := d.sh.FilesStat(ctx, path.Join(dirPath, dirName)) + if err != nil { + return nil, err + } + return &model.Object{ID: file.Hash, Name: dirName, Path: path.Join(dirPath, dirName), Size: int64(file.Size), IsFolder: true}, nil } -func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error { +func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { if d.Mode != "mfs" { - return fmt.Errorf("only write in mfs mode") + return nil, fmt.Errorf("only write in mfs mode") } - newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName - return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/")) + dstPath := path.Join(dstDir.GetPath(), path.Base(srcObj.GetPath())) + d.sh.FilesRm(ctx, dstPath, true) + return &model.Object{ID: srcObj.GetID(), Name: srcObj.GetName(), Path: dstPath, Size: int64(srcObj.GetSize()), IsFolder: srcObj.IsDir()}, + d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath()) } -func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { +func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { if d.Mode != "mfs" { - return fmt.Errorf("only write in mfs mode") + return nil, fmt.Errorf("only write in mfs mode") } - newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath()) - return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/")) + dstPath := path.Join(path.Dir(srcObj.GetPath()), newName) + d.sh.FilesRm(ctx, dstPath, true) + return &model.Object{ID: srcObj.GetID(), Name: newName, Path: dstPath, Size: int64(srcObj.GetSize()), + IsFolder: srcObj.IsDir()}, d.sh.FilesMv(ctx, srcObj.GetPath(), dstPath) +} + +func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if d.Mode != "mfs" { + return nil, fmt.Errorf("only write in mfs mode") + } + dstPath := path.Join(dstDir.GetPath(), path.Base(srcObj.GetPath())) + d.sh.FilesRm(ctx, dstPath, true) + return &model.Object{ID: srcObj.GetID(), Name: srcObj.GetName(), Path: dstPath, Size: int64(srcObj.GetSize()), IsFolder: srcObj.IsDir()}, + d.sh.FilesCp(ctx, path.Join("/ipfs/", srcObj.GetID()), dstPath, shell.FilesCp.Parents(true)) } func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error { @@ -124,19 +160,25 @@ func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error { return d.sh.FilesRm(ctx, obj.GetPath(), true) } -func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { +func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { if d.Mode != "mfs" { - return fmt.Errorf("only write in mfs mode") + return nil, fmt.Errorf("only write in mfs mode") } outHash, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Reader: s, UpdateProgress: up, })) if err != nil { - return err + return nil, err } - err = d.sh.FilesCp(ctx, "/ipfs/"+outHash, dstDir.GetPath()+"/"+strings.ReplaceAll(s.GetName(), "\\", "/")) - return err + dstPath := path.Join(dstDir.GetPath(), s.GetName()) + if s.GetExist() != nil { + d.sh.FilesRm(ctx, dstPath, true) + } + err = d.sh.FilesCp(ctx, path.Join("/ipfs/", outHash), dstPath, shell.FilesCp.Parents(true)) + gateurl := d.gateURL.JoinPath("/ipfs/", outHash) + gateurl.RawQuery = "filename=" + url.QueryEscape(s.GetName()) + return &model.Object{ID: outHash, Name: s.GetName(), Path: dstPath, Size: int64(s.GetSize()), IsFolder: s.IsDir()}, err } //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { diff --git a/drivers/ipfs_api/meta.go b/drivers/ipfs_api/meta.go index c145644c..3837bec2 100644 --- a/drivers/ipfs_api/meta.go +++ b/drivers/ipfs_api/meta.go @@ -9,8 +9,8 @@ type Addition struct { // Usually one of two driver.RootPath Mode string `json:"mode" options:"ipfs,ipns,mfs" type:"select" required:"true"` - Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"` - Gateway string `json:"gateway" default:"http://127.0.0.1:8080"` + Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001" required:"true"` + Gateway string `json:"gateway" default:"http://127.0.0.1:8080" required:"true"` } var config = driver.Config{ From a2f266277c44326a073999699b9b3e6015a9659d Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Sat, 12 Apr 2025 17:01:52 +0800 Subject: [PATCH 164/187] fix(net): unexpected write (#8291 close #8281) --- internal/net/serve.go | 16 +++++++++------- server/common/proxy.go | 37 +++++++++++++++++++++++++++++-------- server/handles/down.go | 35 +++++++++++++++-------------------- server/webdav/webdav.go | 10 ++++++---- 4 files changed, 59 insertions(+), 39 deletions(-) diff --git a/internal/net/serve.go b/internal/net/serve.go index 63e1cb45..bdeac0ac 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -52,19 +52,19 @@ import ( // // If the caller has set w's ETag header formatted per RFC 7232, section 2.3, // ServeHTTP uses it to handle requests using If-Match, If-None-Match, or If-Range. -func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReadCloser model.RangeReadCloserIF) { +func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReadCloser model.RangeReadCloserIF) error { defer RangeReadCloser.Close() setLastModified(w, modTime) done, rangeReq := checkPreconditions(w, r, modTime) if done { - return + return nil } if size < 0 { // since too many functions need file size to work, // will not implement the support of unknown file size here http.Error(w, "negative content size not supported", http.StatusInternalServerError) - return + return nil } code := http.StatusOK @@ -103,7 +103,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time fallthrough default: http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return + return nil } if sumRangesSize(ranges) > size { @@ -124,7 +124,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time code = http.StatusTooManyRequests } http.Error(w, err.Error(), code) - return + return nil } sendContent = reader case len(ranges) == 1: @@ -147,7 +147,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time code = http.StatusTooManyRequests } http.Error(w, err.Error(), code) - return + return nil } sendSize = ra.Length code = http.StatusPartialContent @@ -205,9 +205,11 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time if err == ErrExceedMaxConcurrency { code = http.StatusTooManyRequests } - http.Error(w, err.Error(), code) + w.WriteHeader(code) + return err } } + return nil } func ProcessHeader(origin, override http.Header) http.Header { result := http.Header{} diff --git a/server/common/proxy.go b/server/common/proxy.go index f9e1e4bb..ca7f6325 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -39,11 +39,10 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. return nil } else if link.RangeReadCloser != nil { attachHeader(w, file) - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ + return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ RangeReadCloserIF: link.RangeReadCloser, Limiter: stream.ServerDownloadLimit, }) - return nil } else if link.Concurrency != 0 || link.PartSize != 0 { attachHeader(w, file) size := file.GetSize() @@ -66,11 +65,10 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. rc, err := down.Download(ctx, req) return rc, err } - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ + return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ RangeReadCloserIF: &model.RangeReadCloser{RangeReader: rangeReader}, Limiter: stream.ServerDownloadLimit, }) - return nil } else { //transparent proxy header := net.ProcessHeader(r.Header, link.Header) @@ -90,10 +88,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. Limiter: stream.ServerDownloadLimit, Ctx: r.Context(), }) - if err != nil { - return err - } - return nil + return err } } func attachHeader(w http.ResponseWriter, file model.Obj) { @@ -133,3 +128,29 @@ func ProxyRange(link *model.Link, size int64) { link.RangeReadCloser = nil } } + +type InterceptResponseWriter struct { + http.ResponseWriter + io.Writer +} + +func (iw *InterceptResponseWriter) Write(p []byte) (int, error) { + return iw.Writer.Write(p) +} + +type WrittenResponseWriter struct { + http.ResponseWriter + written bool +} + +func (ww *WrittenResponseWriter) Write(p []byte) (int, error) { + n, err := ww.ResponseWriter.Write(p) + if !ww.written && n > 0 { + ww.written = true + } + return n, err +} + +func (ww *WrittenResponseWriter) IsWritten() bool { + return ww.written +} diff --git a/server/handles/down.go b/server/handles/down.go index 1153881f..2c5c2faf 100644 --- a/server/handles/down.go +++ b/server/handles/down.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "net/http" stdpath "path" "strconv" "strings" @@ -129,15 +128,16 @@ func localProxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange boo if proxyRange { common.ProxyRange(link, file.GetSize()) } + Writer := &common.WrittenResponseWriter{ResponseWriter: c.Writer} //优先处理md文件 if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) { - w := c.Writer buf := bytes.NewBuffer(make([]byte, 0, file.GetSize())) - err = common.Proxy(&proxyResponseWriter{ResponseWriter: w, Writer: buf}, c.Request, link, file) + w := &common.InterceptResponseWriter{ResponseWriter: Writer, Writer: buf} + err = common.Proxy(w, c.Request, link, file) if err == nil && buf.Len() > 0 { - if w.Status() < 200 || w.Status() > 300 { - w.Write(buf.Bytes()) + if c.Writer.Status() < 200 || c.Writer.Status() > 300 { + c.Writer.Write(buf.Bytes()) return } @@ -148,19 +148,23 @@ func localProxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange boo buf.Reset() err = bluemonday.UGCPolicy().SanitizeReaderToWriter(&html, buf) if err == nil { - w.Header().Set("Content-Length", strconv.FormatInt(int64(buf.Len()), 10)) - w.Header().Set("Content-Type", "text/html; charset=utf-8") - _, err = utils.CopyWithBuffer(c.Writer, buf) + Writer.Header().Set("Content-Length", strconv.FormatInt(int64(buf.Len()), 10)) + Writer.Header().Set("Content-Type", "text/html; charset=utf-8") + _, err = utils.CopyWithBuffer(Writer, buf) } } } } else { - err = common.Proxy(c.Writer, c.Request, link, file) + err = common.Proxy(Writer, c.Request, link, file) } - if err != nil { - common.ErrorResp(c, err, 500, true) + if err == nil { return } + if Writer.IsWritten() { + log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err) + } else { + common.ErrorResp(c, err, 500, true) + } } // TODO need optimize @@ -182,12 +186,3 @@ func canProxy(storage driver.Driver, filename string) bool { } return false } - -type proxyResponseWriter struct { - http.ResponseWriter - io.Writer -} - -func (pw *proxyResponseWriter) Write(p []byte) (int, error) { - return pw.Writer.Write(p) -} diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index 1b7ec6ff..f22e15aa 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -24,7 +24,6 @@ import ( "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" - log "github.com/sirupsen/logrus" ) type Handler struct { @@ -59,7 +58,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { status, err = h.handleOptions(brw, r) case "GET", "HEAD", "POST": useBufferedWriter = false - status, err = h.handleGetHeadPost(w, r) + Writer := &common.WrittenResponseWriter{ResponseWriter: w} + status, err = h.handleGetHeadPost(Writer, r) + if status != 0 && Writer.IsWritten() { + status = 0 + } case "DELETE": status, err = h.handleDelete(brw, r) case "PUT": @@ -247,8 +250,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta } err = common.Proxy(w, r, link, fi) if err != nil { - log.Errorf("webdav proxy error: %+v", err) - return http.StatusInternalServerError, err + return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err) } } else if storage.GetStorage().WebdavProxy() && downProxyUrl != "" { u := fmt.Sprintf("%s%s?sign=%s", From 4f5cabc725e3db6e143aae0008753325c74ee44d Mon Sep 17 00:00:00 2001 From: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Date: Sat, 12 Apr 2025 17:02:51 +0800 Subject: [PATCH 165/187] feat: add h2c for http server (#8294) * feat: add h2c for http server * chore(config): add EnableH2c option --- cmd/server.go | 16 +++++++++++----- go.mod | 2 +- go.sum | 2 ++ internal/conf/config.go | 1 + 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/cmd/server.go b/cmd/server.go index d9206cfe..4263f020 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -4,9 +4,6 @@ import ( "context" "errors" "fmt" - ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" - "github.com/KirCute/sftpd-alist" - "github.com/alist-org/alist/v3/internal/fs" "net" "net/http" "os" @@ -16,14 +13,19 @@ import ( "syscall" "time" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/KirCute/sftpd-alist" "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/internal/bootstrap" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server" "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" ) // ServerCmd represents the server command @@ -47,11 +49,15 @@ the address is defined in config file`, r := gin.New() r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out)) server.Init(r) + var httpHandler http.Handler = r + if conf.Conf.Scheme.EnableH2c { + httpHandler = h2c.NewHandler(r, &http2.Server{}) + } var httpSrv, httpsSrv, unixSrv *http.Server if conf.Conf.Scheme.HttpPort != -1 { httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort) utils.Log.Infof("start HTTP server @ %s", httpBase) - httpSrv = &http.Server{Addr: httpBase, Handler: r} + httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler} go func() { err := httpSrv.ListenAndServe() if err != nil && !errors.Is(err, http.ErrServerClosed) { @@ -72,7 +78,7 @@ the address is defined in config file`, } if conf.Conf.Scheme.UnixFile != "" { utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile) - unixSrv = &http.Server{Handler: r} + unixSrv = &http.Server{Handler: httpHandler} go func() { listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile) if err != nil { diff --git a/go.mod b/go.mod index 97a477d3..e8afe0e7 100644 --- a/go.mod +++ b/go.mod @@ -68,7 +68,7 @@ require ( golang.org/x/crypto v0.36.0 golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e golang.org/x/image v0.19.0 - golang.org/x/net v0.37.0 + golang.org/x/net v0.38.0 golang.org/x/oauth2 v0.22.0 golang.org/x/time v0.8.0 google.golang.org/appengine v1.6.8 diff --git a/go.sum b/go.sum index 86fb779e..6fbaeb2b 100644 --- a/go.sum +++ b/go.sum @@ -741,6 +741,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/internal/conf/config.go b/internal/conf/config.go index 1766ae84..cdb86fee 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -35,6 +35,7 @@ type Scheme struct { KeyFile string `json:"key_file" env:"KEY_FILE"` UnixFile string `json:"unix_file" env:"UNIX_FILE"` UnixFilePerm string `json:"unix_file_perm" env:"UNIX_FILE_PERM"` + EnableH2c bool `json:"enable_h2c" env:"ENABLE_H2C"` } type LogConfig struct { From 544a7ea022ad79769d6988141304f47183c69d5e Mon Sep 17 00:00:00 2001 From: Dgs <47767754+dgscyg@users.noreply.github.com> Date: Sat, 12 Apr 2025 17:03:58 +0800 Subject: [PATCH 166/187] fix(pikpak&pikpak_share): fix WebPackageName (#8305) --- drivers/pikpak/util.go | 2 +- drivers/pikpak_share/util.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/pikpak/util.go b/drivers/pikpak/util.go index f88f085c..4cb3fbc3 100644 --- a/drivers/pikpak/util.go +++ b/drivers/pikpak/util.go @@ -84,7 +84,7 @@ const ( WebClientID = "YUMx5nI8ZU8Ap8pm" WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg" WebClientVersion = "2.0.0" - WebPackageName = "drive.mypikpak.com" + WebPackageName = "mypikpak.com" WebSdkVersion = "8.0.3" PCClientID = "YvtoWO6GNHiuCl7x" PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA" diff --git a/drivers/pikpak_share/util.go b/drivers/pikpak_share/util.go index 4111779f..2980069e 100644 --- a/drivers/pikpak_share/util.go +++ b/drivers/pikpak_share/util.go @@ -67,7 +67,7 @@ const ( WebClientID = "YUMx5nI8ZU8Ap8pm" WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg" WebClientVersion = "2.0.0" - WebPackageName = "drive.mypikpak.com" + WebPackageName = "mypikpak.com" WebSdkVersion = "8.0.3" PCClientID = "YvtoWO6GNHiuCl7x" PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA" From d0ee90cd115503151280c22b9f46e71c35df71dc Mon Sep 17 00:00:00 2001 From: Dgs <47767754+dgscyg@users.noreply.github.com> Date: Sat, 12 Apr 2025 17:05:58 +0800 Subject: [PATCH 167/187] fix(thunder): fix login issue (#8342 close #8288) --- drivers/thunder/driver.go | 132 +++++++++++++++++++++++++++++++------- drivers/thunder/meta.go | 14 ++-- drivers/thunder/types.go | 104 ++++++++++++++++++++++++++++-- drivers/thunder/util.go | 88 +++++++++++++++++++++++-- 4 files changed, 304 insertions(+), 34 deletions(-) diff --git a/drivers/thunder/driver.go b/drivers/thunder/driver.go index 51396ee8..1d2f2a81 100644 --- a/drivers/thunder/driver.go +++ b/drivers/thunder/driver.go @@ -45,26 +45,29 @@ func (x *Thunder) Init(ctx context.Context) (err error) { Common: &Common{ client: base.NewRestyClient(), Algorithms: []string{ - "HPxr4BVygTQVtQkIMwQH33ywbgYG5l4JoR", - "GzhNkZ8pOBsCY+7", - "v+l0ImTpG7c7/", - "e5ztohgVXNP", - "t", - "EbXUWyVVqQbQX39Mbjn2geok3/0WEkAVxeqhtx857++kjJiRheP8l77gO", - "o7dvYgbRMOpHXxCs", - "6MW8TD8DphmakaxCqVrfv7NReRRN7ck3KLnXBculD58MvxjFRqT+", - "kmo0HxCKVfmxoZswLB4bVA/dwqbVAYghSb", - "j", - "4scKJNdd7F27Hv7tbt", + "9uJNVj/wLmdwKrJaVj/omlQ", + "Oz64Lp0GigmChHMf/6TNfxx7O9PyopcczMsnf", + "Eb+L7Ce+Ej48u", + "jKY0", + "ASr0zCl6v8W4aidjPK5KHd1Lq3t+vBFf41dqv5+fnOd", + "wQlozdg6r1qxh0eRmt3QgNXOvSZO6q/GXK", + "gmirk+ciAvIgA/cxUUCema47jr/YToixTT+Q6O", + "5IiCoM9B1/788ntB", + "P07JH0h6qoM6TSUAK2aL9T5s2QBVeY9JWvalf", + "+oK0AN", }, - DeviceID: utils.GetMD5EncodeStr(x.Username + x.Password), + DeviceID: func() string { + if len(x.DeviceID) != 32 { + return utils.GetMD5EncodeStr(x.DeviceID) + } + return x.DeviceID + }(), ClientID: "Xp6vsxz_7IYVw2BB", ClientSecret: "Xp6vsy4tN9toTVdMSpomVdXpRmES", - ClientVersion: "7.51.0.8196", + ClientVersion: "8.31.0.9726", PackageName: "com.xunlei.downloadprovider", - UserAgent: "ANDROID-com.xunlei.downloadprovider/7.51.0.8196 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/220200 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)", + UserAgent: "ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)", DownloadUserAgent: "Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)", - refreshCTokenCk: func(token string) { x.CaptchaToken = token op.MustSaveDriverStorage(x) @@ -80,6 +83,8 @@ func (x *Thunder) Init(ctx context.Context) (err error) { x.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error())) op.MustSaveDriverStorage(x) } + // 清空 信任密钥 + x.Addition.CreditKey = "" } x.SetTokenResp(token) return err @@ -93,6 +98,17 @@ func (x *Thunder) Init(ctx context.Context) (err error) { x.SetCaptchaToken(ctoekn) } + if x.Addition.CreditKey != "" { + x.SetCreditKey(x.Addition.CreditKey) + } + + if x.Addition.DeviceID != "" { + x.Common.DeviceID = x.Addition.DeviceID + } else { + x.Addition.DeviceID = x.Common.DeviceID + op.MustSaveDriverStorage(x) + } + // 防止重复登录 identity := x.GetIdentity() if x.identity != identity || !x.IsLogin() { @@ -102,6 +118,8 @@ func (x *Thunder) Init(ctx context.Context) (err error) { if err != nil { return err } + // 清空 信任密钥 + x.Addition.CreditKey = "" x.SetTokenResp(token) } return nil @@ -161,6 +179,17 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) { x.SetCaptchaToken(x.CaptchaToken) } + if x.ExpertAddition.CreditKey != "" { + x.SetCreditKey(x.ExpertAddition.CreditKey) + } + + if x.ExpertAddition.DeviceID != "" { + x.Common.DeviceID = x.ExpertAddition.DeviceID + } else { + x.ExpertAddition.DeviceID = x.Common.DeviceID + op.MustSaveDriverStorage(x) + } + // 签名方法 if x.SignType == "captcha_sign" { x.Common.Timestamp = x.Timestamp @@ -194,6 +223,8 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) { if err != nil { return err } + // 清空 信任密钥 + x.ExpertAddition.CreditKey = "" x.SetTokenResp(token) x.SetRefreshTokenFunc(func() error { token, err := x.XunLeiCommon.RefreshToken(x.TokenResp.RefreshToken) @@ -202,6 +233,8 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) { if err != nil { x.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error())) } + // 清空 信任密钥 + x.ExpertAddition.CreditKey = "" } x.SetTokenResp(token) op.MustSaveDriverStorage(x) @@ -233,7 +266,8 @@ func (x *ThunderExpert) SetTokenResp(token *TokenResp) { type XunLeiCommon struct { *Common - *TokenResp // 登录信息 + *TokenResp // 登录信息 + *CoreLoginResp // core登录信息 refreshTokenFunc func() error } @@ -433,6 +467,10 @@ func (xc *XunLeiCommon) SetTokenResp(tr *TokenResp) { xc.TokenResp = tr } +func (xc *XunLeiCommon) SetCoreTokenResp(tr *CoreLoginResp) { + xc.CoreLoginResp = tr +} + // 携带Authorization和CaptchaToken的请求 func (xc *XunLeiCommon) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { data, err := xc.Common.Request(url, method, func(req *resty.Request) { @@ -461,7 +499,7 @@ func (xc *XunLeiCommon) Request(url string, method string, callback base.ReqCall } return nil, err case 9: // 验证码token过期 - if err = xc.RefreshCaptchaTokenAtLogin(GetAction(method, url), xc.UserID); err != nil { + if err = xc.RefreshCaptchaTokenAtLogin(GetAction(method, url), xc.TokenResp.UserID); err != nil { return nil, err } default: @@ -493,20 +531,25 @@ func (xc *XunLeiCommon) RefreshToken(refreshToken string) (*TokenResp, error) { // 登录 func (xc *XunLeiCommon) Login(username, password string) (*TokenResp, error) { - url := XLUSER_API_URL + "/auth/signin" - err := xc.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), username) + //v3 login拿到 sessionID + sessionID, err := xc.CoreLogin(username, password) if err != nil { return nil, err } + //v1 login拿到令牌 + url := XLUSER_API_URL + "/auth/signin/token" + if err = xc.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), username); err != nil { + return nil, err + } var resp TokenResp _, err = xc.Common.Request(url, http.MethodPost, func(req *resty.Request) { + req.SetPathParam("client_id", xc.ClientID) req.SetBody(&SignInRequest{ - CaptchaToken: xc.GetCaptchaToken(), ClientID: xc.ClientID, ClientSecret: xc.ClientSecret, - Username: username, - Password: password, + Provider: SignProvider, + SigninToken: sessionID, }) }, &resp) if err != nil { @@ -582,3 +625,48 @@ func (xc *XunLeiCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string } return nil } + +func (xc *XunLeiCommon) CoreLogin(username string, password string) (sessionID string, err error) { + url := XLUSER_API_BASE_URL + "/xluser.core.login/v3/login" + var resp CoreLoginResp + res, err := xc.Common.Request(url, http.MethodPost, func(req *resty.Request) { + req.SetHeader("User-Agent", "android-ok-http-client/xl-acc-sdk/version-5.0.12.512000") + req.SetBody(&CoreLoginRequest{ + ProtocolVersion: "301", + SequenceNo: "1000012", + PlatformVersion: "10", + IsCompressed: "0", + Appid: APPID, + ClientVersion: "8.31.0.9726", + PeerID: "00000000000000000000000000000000", + AppName: "ANDROID-com.xunlei.downloadprovider", + SdkVersion: "512000", + Devicesign: generateDeviceSign(xc.DeviceID, xc.PackageName), + NetWorkType: "WIFI", + ProviderName: "NONE", + DeviceModel: "M2004J7AC", + DeviceName: "Xiaomi_M2004j7ac", + OSVersion: "12", + Creditkey: xc.GetCreditKey(), + Hl: "zh-CN", + UserName: username, + PassWord: password, + VerifyKey: "", + VerifyCode: "", + IsMd5Pwd: "0", + }) + }, nil) + if err != nil { + return "", err + } + + if err = utils.Json.Unmarshal(res, &resp); err != nil { + return "", err + } + + xc.SetCoreTokenResp(&resp) + + sessionID = resp.SessionID + + return sessionID, nil +} diff --git a/drivers/thunder/meta.go b/drivers/thunder/meta.go index 12b01cba..5e6e2513 100644 --- a/drivers/thunder/meta.go +++ b/drivers/thunder/meta.go @@ -23,23 +23,25 @@ type ExpertAddition struct { RefreshToken string `json:"refresh_token" required:"true" help:"login type is refresh_token,this is required"` // 签名方法1 - Algorithms string `json:"algorithms" required:"true" help:"sign type is algorithms,this is required" default:"HPxr4BVygTQVtQkIMwQH33ywbgYG5l4JoR,GzhNkZ8pOBsCY+7,v+l0ImTpG7c7/,e5ztohgVXNP,t,EbXUWyVVqQbQX39Mbjn2geok3/0WEkAVxeqhtx857++kjJiRheP8l77gO,o7dvYgbRMOpHXxCs,6MW8TD8DphmakaxCqVrfv7NReRRN7ck3KLnXBculD58MvxjFRqT+,kmo0HxCKVfmxoZswLB4bVA/dwqbVAYghSb,j,4scKJNdd7F27Hv7tbt"` + Algorithms string `json:"algorithms" required:"true" help:"sign type is algorithms,this is required" default:"9uJNVj/wLmdwKrJaVj/omlQ,Oz64Lp0GigmChHMf/6TNfxx7O9PyopcczMsnf,Eb+L7Ce+Ej48u,jKY0,ASr0zCl6v8W4aidjPK5KHd1Lq3t+vBFf41dqv5+fnOd,wQlozdg6r1qxh0eRmt3QgNXOvSZO6q/GXK,gmirk+ciAvIgA/cxUUCema47jr/YToixTT+Q6O,5IiCoM9B1/788ntB,P07JH0h6qoM6TSUAK2aL9T5s2QBVeY9JWvalf,+oK0AN"` // 签名方法2 CaptchaSign string `json:"captcha_sign" required:"true" help:"sign type is captcha_sign,this is required"` Timestamp string `json:"timestamp" required:"true" help:"sign type is captcha_sign,this is required"` // 验证码 CaptchaToken string `json:"captcha_token"` + // 信任密钥 + CreditKey string `json:"credit_key" help:"credit key,used for login"` // 必要且影响登录,由签名决定 - DeviceID string `json:"device_id" required:"true" default:"9aa5c268e7bcfc197a9ad88e2fb330e5"` + DeviceID string `json:"device_id" default:""` ClientID string `json:"client_id" required:"true" default:"Xp6vsxz_7IYVw2BB"` ClientSecret string `json:"client_secret" required:"true" default:"Xp6vsy4tN9toTVdMSpomVdXpRmES"` - ClientVersion string `json:"client_version" required:"true" default:"7.51.0.8196"` + ClientVersion string `json:"client_version" required:"true" default:"8.31.0.9726"` PackageName string `json:"package_name" required:"true" default:"com.xunlei.downloadprovider"` //不影响登录,影响下载速度 - UserAgent string `json:"user_agent" required:"true" default:"ANDROID-com.xunlei.downloadprovider/7.51.0.8196 netWorkType/4G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/220200 Oauth2Client/0.9 (Linux 4_14_186-perf-gdcf98eab238b) (JAVA 0)"` + UserAgent string `json:"user_agent" required:"true" default:"ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)"` DownloadUserAgent string `json:"download_user_agent" required:"true" default:"Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)"` //优先使用视频链接代替下载链接 @@ -74,6 +76,10 @@ type Addition struct { Username string `json:"username" required:"true"` Password string `json:"password" required:"true"` CaptchaToken string `json:"captcha_token"` + // 信任密钥 + CreditKey string `json:"credit_key" help:"credit key,used for login"` + // 登录设备ID + DeviceID string `json:"device_id" default:""` } // 登录特征,用于判断是否重新登录 diff --git a/drivers/thunder/types.go b/drivers/thunder/types.go index b7355b2a..1fe8432c 100644 --- a/drivers/thunder/types.go +++ b/drivers/thunder/types.go @@ -18,6 +18,10 @@ type ErrResp struct { } func (e *ErrResp) IsError() bool { + if e.ErrorMsg == "success" { + return false + } + return e.ErrorCode != 0 || e.ErrorMsg != "" || e.ErrorDescription != "" } @@ -61,13 +65,79 @@ func (t *TokenResp) Token() string { } type SignInRequest struct { - CaptchaToken string `json:"captcha_token"` - ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` - Username string `json:"username"` - Password string `json:"password"` + Provider string `json:"provider"` + SigninToken string `json:"signin_token"` +} + +type CoreLoginRequest struct { + ProtocolVersion string `json:"protocolVersion"` + SequenceNo string `json:"sequenceNo"` + PlatformVersion string `json:"platformVersion"` + IsCompressed string `json:"isCompressed"` + Appid string `json:"appid"` + ClientVersion string `json:"clientVersion"` + PeerID string `json:"peerID"` + AppName string `json:"appName"` + SdkVersion string `json:"sdkVersion"` + Devicesign string `json:"devicesign"` + NetWorkType string `json:"netWorkType"` + ProviderName string `json:"providerName"` + DeviceModel string `json:"deviceModel"` + DeviceName string `json:"deviceName"` + OSVersion string `json:"OSVersion"` + Creditkey string `json:"creditkey"` + Hl string `json:"hl"` + UserName string `json:"userName"` + PassWord string `json:"passWord"` + VerifyKey string `json:"verifyKey"` + VerifyCode string `json:"verifyCode"` + IsMd5Pwd string `json:"isMd5Pwd"` +} + +type CoreLoginResp struct { + Account string `json:"account"` + Creditkey string `json:"creditkey"` + /* Error string `json:"error"` + ErrorCode string `json:"errorCode"` + ErrorDescription string `json:"error_description"`*/ + ExpiresIn int `json:"expires_in"` + IsCompressed string `json:"isCompressed"` + IsSetPassWord string `json:"isSetPassWord"` + KeepAliveMinPeriod string `json:"keepAliveMinPeriod"` + KeepAlivePeriod string `json:"keepAlivePeriod"` + LoginKey string `json:"loginKey"` + NickName string `json:"nickName"` + PlatformVersion string `json:"platformVersion"` + ProtocolVersion string `json:"protocolVersion"` + SecureKey string `json:"secureKey"` + SequenceNo string `json:"sequenceNo"` + SessionID string `json:"sessionID"` + Timestamp string `json:"timestamp"` + UserID string `json:"userID"` + UserName string `json:"userName"` + UserNewNo string `json:"userNewNo"` + Version string `json:"version"` + /* VipList []struct { + ExpireDate string `json:"expireDate"` + IsAutoDeduct string `json:"isAutoDeduct"` + IsVip string `json:"isVip"` + IsYear string `json:"isYear"` + PayID string `json:"payId"` + PayName string `json:"payName"` + Register string `json:"register"` + Vasid string `json:"vasid"` + VasType string `json:"vasType"` + VipDayGrow string `json:"vipDayGrow"` + VipGrow string `json:"vipGrow"` + VipLevel string `json:"vipLevel"` + Icon struct { + General string `json:"general"` + Small string `json:"small"` + } `json:"icon"` + } `json:"vipList"`*/ } /* @@ -251,3 +321,29 @@ type Params struct { PredictSpeed string `json:"predict_speed"` PredictType string `json:"predict_type"` } + +// LoginReviewResp 登录验证响应 +type LoginReviewResp struct { + Creditkey string `json:"creditkey"` + Error string `json:"error"` + ErrorCode string `json:"errorCode"` + ErrorDesc string `json:"errorDesc"` + ErrorDescURL string `json:"errorDescUrl"` + ErrorIsRetry int `json:"errorIsRetry"` + ErrorDescription string `json:"error_description"` + IsCompressed string `json:"isCompressed"` + PlatformVersion string `json:"platformVersion"` + ProtocolVersion string `json:"protocolVersion"` + Reviewurl string `json:"reviewurl"` + SequenceNo string `json:"sequenceNo"` + UserID string `json:"userID"` + VerifyType string `json:"verifyType"` +} + +// ReviewData 验证数据 +type ReviewData struct { + Creditkey string `json:"creditkey"` + Reviewurl string `json:"reviewurl"` + Deviceid string `json:"deviceid"` + Devicesign string `json:"devicesign"` +} diff --git a/drivers/thunder/util.go b/drivers/thunder/util.go index f509e6b2..b7afe56d 100644 --- a/drivers/thunder/util.go +++ b/drivers/thunder/util.go @@ -1,8 +1,10 @@ package thunder import ( + "crypto/md5" "crypto/sha1" "encoding/hex" + "encoding/json" "fmt" "io" "net/http" @@ -15,10 +17,11 @@ import ( ) const ( - API_URL = "https://api-pan.xunlei.com/drive/v1" - FILE_API_URL = API_URL + "/files" - TASK_API_URL = API_URL + "/tasks" - XLUSER_API_URL = "https://xluser-ssl.xunlei.com/v1" + API_URL = "https://api-pan.xunlei.com/drive/v1" + FILE_API_URL = API_URL + "/files" + TASK_API_URL = API_URL + "/tasks" + XLUSER_API_BASE_URL = "https://xluser-ssl.xunlei.com" + XLUSER_API_URL = XLUSER_API_BASE_URL + "/v1" ) const ( @@ -34,6 +37,12 @@ const ( UPLOAD_TYPE_URL = "UPLOAD_TYPE_URL" ) +const ( + SignProvider = "access_end_point_token" + APPID = "40" + APPKey = "34a062aaa22f906fca4fefe9fb3a3021" +) + func GetAction(method string, url string) string { urlpath := regexp.MustCompile(`://[^/]+((/[^/\s?#]+)*)`).FindStringSubmatch(url)[1] return method + ":" + urlpath @@ -44,6 +53,8 @@ type Common struct { captchaToken string + creditKey string + // 签名相关,二选一 Algorithms []string Timestamp, CaptchaSign string @@ -69,6 +80,13 @@ func (c *Common) GetCaptchaToken() string { return c.captchaToken } +func (c *Common) SetCreditKey(creditKey string) { + c.creditKey = creditKey +} +func (c *Common) GetCreditKey() string { + return c.creditKey +} + // 刷新验证码token(登录后) func (c *Common) RefreshCaptchaTokenAtLogin(action, userID string) error { metas := map[string]string{ @@ -170,12 +188,53 @@ func (c *Common) Request(url, method string, callback base.ReqCallback, resp int var erron ErrResp utils.Json.Unmarshal(res.Body(), &erron) if erron.IsError() { + // review_panel 表示需要短信验证码进行验证 + if erron.ErrorMsg == "review_panel" { + return nil, c.getReviewData(res) + } + return nil, &erron } return res.Body(), nil } +// 获取验证所需内容 +func (c *Common) getReviewData(res *resty.Response) error { + var reviewResp LoginReviewResp + var reviewData ReviewData + + if err := utils.Json.Unmarshal(res.Body(), &reviewResp); err != nil { + return err + } + + deviceSign := generateDeviceSign(c.DeviceID, c.PackageName) + + reviewData = ReviewData{ + Creditkey: reviewResp.Creditkey, + Reviewurl: reviewResp.Reviewurl + "&deviceid=" + deviceSign, + Deviceid: deviceSign, + Devicesign: deviceSign, + } + + // 将reviewData转为JSON字符串 + reviewDataJSON, _ := json.MarshalIndent(reviewData, "", " ") + //reviewDataJSON, _ := json.Marshal(reviewData) + + return fmt.Errorf(` +
+ 🔒 本次登录需要验证
+ This login requires verification + +

下面是验证所需要的数据,具体使用方法请参照对应的驱动文档
+ Below are the relevant verification data. For specific usage methods, please refer to the corresponding driver documentation.

+
+
%s
+
+
`, string(reviewDataJSON)) +} + // 计算文件Gcid func getGcid(r io.Reader, size int64) (string, error) { calcBlockSize := func(j int64) int64 { @@ -201,3 +260,24 @@ func getGcid(r io.Reader, size int64) (string, error) { } return hex.EncodeToString(hash1.Sum(nil)), nil } + +func generateDeviceSign(deviceID, packageName string) string { + + signatureBase := fmt.Sprintf("%s%s%s%s", deviceID, packageName, APPID, APPKey) + + sha1Hash := sha1.New() + sha1Hash.Write([]byte(signatureBase)) + sha1Result := sha1Hash.Sum(nil) + + sha1String := hex.EncodeToString(sha1Result) + + md5Hash := md5.New() + md5Hash.Write([]byte(sha1String)) + md5Result := md5Hash.Sum(nil) + + md5String := hex.EncodeToString(md5Result) + + deviceSign := fmt.Sprintf("div101.%s%s", deviceID, md5String) + + return deviceSign +} From c8470b9a2a08276e7e8517caa70e6d10812a9978 Mon Sep 17 00:00:00 2001 From: Yifan Gao Date: Sat, 12 Apr 2025 17:09:46 +0800 Subject: [PATCH 168/187] fix(fs): remove old target object from cache before updating (#8352) --- internal/op/fs.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/internal/op/fs.go b/internal/op/fs.go index 01727e75..99c2fe34 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -3,6 +3,7 @@ package op import ( "context" stdpath "path" + "slices" "time" "github.com/Xhofe/go-cache" @@ -25,6 +26,12 @@ func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj key := Key(storage, path) objs, ok := listCache.Get(key) if ok { + for i, obj := range objs { + if obj.GetName() == newObj.GetName() { + objs = slices.Delete(objs, i, i+1) + break + } + } for i, obj := range objs { if obj.GetName() == oldObj.GetName() { objs[i] = newObj From f0b1aeaf8d846b3aee41fed29bf03ad7afa4e72f Mon Sep 17 00:00:00 2001 From: asdfghjkl <61342682+anobodys@users.noreply.github.com> Date: Sat, 12 Apr 2025 17:12:40 +0800 Subject: [PATCH 169/187] feat(doubao): support upload (#8302 close #8335) * feat(doubao): support upload * fix(doubao): fix file list cursor * fix: handle strconv.Atoi err Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: anobodys Co-authored-by: Andy Hsu Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- drivers/doubao/driver.go | 133 ++++-- drivers/doubao/meta.go | 5 +- drivers/doubao/types.go | 353 ++++++++++++++- drivers/doubao/util.go | 950 ++++++++++++++++++++++++++++++++++++++- 4 files changed, 1396 insertions(+), 45 deletions(-) diff --git a/drivers/doubao/driver.go b/drivers/doubao/driver.go index 04f74325..a066feee 100644 --- a/drivers/doubao/driver.go +++ b/drivers/doubao/driver.go @@ -3,19 +3,25 @@ package doubao import ( "context" "errors" - "time" - "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" "github.com/google/uuid" + "net/http" + "strconv" + "strings" + "time" ) type Doubao struct { model.Storage Addition + *UploadToken + UserId string + uploadThread int } func (d *Doubao) Config() driver.Config { @@ -29,6 +35,31 @@ func (d *Doubao) GetAddition() driver.Additional { func (d *Doubao) Init(ctx context.Context) error { // TODO login / refresh token //op.MustSaveDriverStorage(d) + uploadThread, err := strconv.Atoi(d.UploadThread) + if err != nil || uploadThread < 1 { + d.uploadThread, d.UploadThread = 3, "3" // Set default value + } else { + d.uploadThread = uploadThread + } + + if d.UserId == "" { + userInfo, err := d.getUserInfo() + if err != nil { + return err + } + + d.UserId = strconv.FormatInt(userInfo.UserID, 10) + } + + if d.UploadToken == nil { + uploadToken, err := d.initUploadToken() + if err != nil { + return err + } + + d.UploadToken = uploadToken + } + return nil } @@ -38,18 +69,12 @@ func (d *Doubao) Drop(ctx context.Context) error { func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { var files []model.Obj - var r NodeInfoResp - _, err := d.request("/samantha/aispace/node_info", "POST", func(req *resty.Request) { - req.SetBody(base.Json{ - "node_id": dir.GetID(), - "need_full_path": false, - }) - }, &r) + fileList, err := d.getFiles(dir.GetID(), "") if err != nil { return nil, err } - for _, child := range r.Data.Children { + for _, child := range fileList { files = append(files, &Object{ Object: model.Object{ ID: child.ID, @@ -60,34 +85,65 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ( Ctime: time.Unix(child.CreateTime, 0), IsFolder: child.NodeType == 1, }, - Key: child.Key, + Key: child.Key, + NodeType: child.NodeType, }) } + return files, nil } func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var downloadUrl string + if u, ok := file.(*Object); ok { - var r GetFileUrlResp - _, err := d.request("/alice/message/get_file_url", "POST", func(req *resty.Request) { - req.SetBody(base.Json{ - "uris": []string{u.Key}, - "type": "file", - }) - }, &r) - if err != nil { - return nil, err + switch u.NodeType { + case VideoType, AudioType: + var r GetVideoFileUrlResp + _, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "key": u.Key, + "node_id": file.GetID(), + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.OriginalMediaInfo.MainURL + default: + var r GetFileUrlResp + _, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{u.Key}, + "type": FileNodeType[u.NodeType], + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.FileUrls[0].MainURL } + + // 生成标准的Content-Disposition + contentDisposition := generateContentDisposition(u.Name) + return &model.Link{ - URL: r.Data.FileUrls[0].MainURL, + URL: downloadUrl, + Header: http.Header{ + "User-Agent": []string{UserAgent}, + "Content-Disposition": []string{contentDisposition}, + }, }, nil } + return nil, errors.New("can't convert obj to URL") } func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { var r UploadNodeResp - _, err := d.request("/samantha/aispace/upload_node", "POST", func(req *resty.Request) { + _, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "node_list": []base.Json{ { @@ -104,7 +160,7 @@ func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error { var r UploadNodeResp - _, err := d.request("/samantha/aispace/move_node", "POST", func(req *resty.Request) { + _, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "node_list": []base.Json{ {"id": srcObj.GetID()}, @@ -118,7 +174,7 @@ func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error { var r BaseResp - _, err := d.request("/samantha/aispace/rename_node", "POST", func(req *resty.Request) { + _, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "node_id": srcObj.GetID(), "node_name": newName, @@ -134,15 +190,38 @@ func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error { var r BaseResp - _, err := d.request("/samantha/aispace/delete_node", "POST", func(req *resty.Request) { + _, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}}) }, &r) return err } func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - // TODO upload file, optional - return nil, errs.NotImplement + // 根据MIME类型确定数据类型 + mimetype := file.GetMimetype() + dataType := FileDataType + + switch { + case strings.HasPrefix(mimetype, "video/"): + dataType = VideoDataType + case strings.HasPrefix(mimetype, "audio/"): + dataType = VideoDataType // 音频与视频使用相同的处理方式 + case strings.HasPrefix(mimetype, "image/"): + dataType = ImgDataType + } + + // 获取上传配置 + uploadConfig := UploadConfig{} + if err := d.getUploadConfig(&uploadConfig, dataType, file); err != nil { + return nil, err + } + + // 根据文件大小选择上传方式 + if file.GetSize() <= 1*utils.MB { // 小于1MB,使用普通模式上传 + return d.Upload(&uploadConfig, dstDir, file, up, dataType) + } + // 大文件使用分片上传 + return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType) } func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { diff --git a/drivers/doubao/meta.go b/drivers/doubao/meta.go index bb9e3f25..c3d8eb34 100644 --- a/drivers/doubao/meta.go +++ b/drivers/doubao/meta.go @@ -10,7 +10,8 @@ type Addition struct { // driver.RootPath driver.RootID // define other - Cookie string `json:"cookie" type:"text"` + Cookie string `json:"cookie" type:"text"` + UploadThread string `json:"upload_thread" default:"3"` } var config = driver.Config{ @@ -19,7 +20,7 @@ var config = driver.Config{ OnlyLocal: false, OnlyProxy: false, NoCache: false, - NoUpload: true, + NoUpload: false, NeedMs: false, DefaultRoot: "0", CheckStatus: false, diff --git a/drivers/doubao/types.go b/drivers/doubao/types.go index 2dc5a61d..4264eb7d 100644 --- a/drivers/doubao/types.go +++ b/drivers/doubao/types.go @@ -1,6 +1,11 @@ package doubao -import "github.com/alist-org/alist/v3/internal/model" +import ( + "encoding/json" + "fmt" + "github.com/alist-org/alist/v3/internal/model" + "time" +) type BaseResp struct { Code int `json:"code"` @@ -10,14 +15,14 @@ type BaseResp struct { type NodeInfoResp struct { BaseResp Data struct { - NodeInfo NodeInfo `json:"node_info"` - Children []NodeInfo `json:"children"` - NextCursor string `json:"next_cursor"` - HasMore bool `json:"has_more"` + NodeInfo File `json:"node_info"` + Children []File `json:"children"` + NextCursor string `json:"next_cursor"` + HasMore bool `json:"has_more"` } `json:"data"` } -type NodeInfo struct { +type File struct { ID string `json:"id"` Name string `json:"name"` Key string `json:"key"` @@ -44,6 +49,39 @@ type GetFileUrlResp struct { } `json:"data"` } +type GetVideoFileUrlResp struct { + BaseResp + Data struct { + MediaType string `json:"media_type"` + MediaInfo []struct { + Meta struct { + Height string `json:"height"` + Width string `json:"width"` + Format string `json:"format"` + Duration float64 `json:"duration"` + CodecType string `json:"codec_type"` + Definition string `json:"definition"` + } `json:"meta"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"media_info"` + OriginalMediaInfo struct { + Meta struct { + Height string `json:"height"` + Width string `json:"width"` + Format string `json:"format"` + Duration float64 `json:"duration"` + CodecType string `json:"codec_type"` + Definition string `json:"definition"` + } `json:"meta"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"original_media_info"` + PosterURL string `json:"poster_url"` + PlayableStatus int `json:"playable_status"` + } `json:"data"` +} + type UploadNodeResp struct { BaseResp Data struct { @@ -60,5 +98,306 @@ type UploadNodeResp struct { type Object struct { model.Object - Key string + Key string + NodeType int +} + +type UserInfoResp struct { + Data UserInfo `json:"data"` + Message string `json:"message"` +} +type AppUserInfo struct { + BuiAuditInfo string `json:"bui_audit_info"` +} +type AuditInfo struct { +} +type Details struct { +} +type BuiAuditInfo struct { + AuditInfo AuditInfo `json:"audit_info"` + IsAuditing bool `json:"is_auditing"` + AuditStatus int `json:"audit_status"` + LastUpdateTime int `json:"last_update_time"` + UnpassReason string `json:"unpass_reason"` + Details Details `json:"details"` +} +type Connects struct { + Platform string `json:"platform"` + ProfileImageURL string `json:"profile_image_url"` + ExpiredTime int `json:"expired_time"` + ExpiresIn int `json:"expires_in"` + PlatformScreenName string `json:"platform_screen_name"` + UserID int64 `json:"user_id"` + PlatformUID string `json:"platform_uid"` + SecPlatformUID string `json:"sec_platform_uid"` + PlatformAppID int `json:"platform_app_id"` + ModifyTime int `json:"modify_time"` + AccessToken string `json:"access_token"` + OpenID string `json:"open_id"` +} +type OperStaffRelationInfo struct { + HasPassword int `json:"has_password"` + Mobile string `json:"mobile"` + SecOperStaffUserID string `json:"sec_oper_staff_user_id"` + RelationMobileCountryCode int `json:"relation_mobile_country_code"` +} +type UserInfo struct { + AppID int `json:"app_id"` + AppUserInfo AppUserInfo `json:"app_user_info"` + AvatarURL string `json:"avatar_url"` + BgImgURL string `json:"bg_img_url"` + BuiAuditInfo BuiAuditInfo `json:"bui_audit_info"` + CanBeFoundByPhone int `json:"can_be_found_by_phone"` + Connects []Connects `json:"connects"` + CountryCode int `json:"country_code"` + Description string `json:"description"` + DeviceID int `json:"device_id"` + Email string `json:"email"` + EmailCollected bool `json:"email_collected"` + Gender int `json:"gender"` + HasPassword int `json:"has_password"` + HmRegion int `json:"hm_region"` + IsBlocked int `json:"is_blocked"` + IsBlocking int `json:"is_blocking"` + IsRecommendAllowed int `json:"is_recommend_allowed"` + IsVisitorAccount bool `json:"is_visitor_account"` + Mobile string `json:"mobile"` + Name string `json:"name"` + NeedCheckBindStatus bool `json:"need_check_bind_status"` + OdinUserType int `json:"odin_user_type"` + OperStaffRelationInfo OperStaffRelationInfo `json:"oper_staff_relation_info"` + PhoneCollected bool `json:"phone_collected"` + RecommendHintMessage string `json:"recommend_hint_message"` + ScreenName string `json:"screen_name"` + SecUserID string `json:"sec_user_id"` + SessionKey string `json:"session_key"` + UseHmRegion bool `json:"use_hm_region"` + UserCreateTime int `json:"user_create_time"` + UserID int64 `json:"user_id"` + UserIDStr string `json:"user_id_str"` + UserVerified bool `json:"user_verified"` + VerifiedContent string `json:"verified_content"` +} + +// UploadToken 上传令牌配置 +type UploadToken struct { + Alice map[string]UploadAuthToken + Samantha MediaUploadAuthToken +} + +// UploadAuthToken 多种类型的上传配置:图片/文件 +type UploadAuthToken struct { + ServiceID string `json:"service_id"` + UploadPathPrefix string `json:"upload_path_prefix"` + Auth struct { + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + SessionToken string `json:"session_token"` + ExpiredTime time.Time `json:"expired_time"` + CurrentTime time.Time `json:"current_time"` + } `json:"auth"` + UploadHost string `json:"upload_host"` +} + +// MediaUploadAuthToken 媒体上传配置 +type MediaUploadAuthToken struct { + StsToken struct { + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + SessionToken string `json:"session_token"` + ExpiredTime time.Time `json:"expired_time"` + CurrentTime time.Time `json:"current_time"` + } `json:"sts_token"` + UploadInfo struct { + VideoHost string `json:"video_host"` + SpaceName string `json:"space_name"` + } `json:"upload_info"` +} + +type UploadAuthTokenResp struct { + BaseResp + Data UploadAuthToken `json:"data"` +} + +type MediaUploadAuthTokenResp struct { + BaseResp + Data MediaUploadAuthToken `json:"data"` +} + +type ResponseMetadata struct { + RequestID string `json:"RequestId"` + Action string `json:"Action"` + Version string `json:"Version"` + Service string `json:"Service"` + Region string `json:"Region"` + Error struct { + CodeN int `json:"CodeN,omitempty"` + Code string `json:"Code,omitempty"` + Message string `json:"Message,omitempty"` + } `json:"Error,omitempty"` +} + +type UploadConfig struct { + UploadAddress UploadAddress `json:"UploadAddress"` + FallbackUploadAddress FallbackUploadAddress `json:"FallbackUploadAddress"` + InnerUploadAddress InnerUploadAddress `json:"InnerUploadAddress"` + RequestID string `json:"RequestId"` + SDKParam interface{} `json:"SDKParam"` +} + +type UploadConfigResp struct { + ResponseMetadata `json:"ResponseMetadata"` + Result UploadConfig `json:"Result"` +} + +// StoreInfo 存储信息 +type StoreInfo struct { + StoreURI string `json:"StoreUri"` + Auth string `json:"Auth"` + UploadID string `json:"UploadID"` + UploadHeader map[string]interface{} `json:"UploadHeader,omitempty"` + StorageHeader map[string]interface{} `json:"StorageHeader,omitempty"` +} + +// UploadAddress 上传地址信息 +type UploadAddress struct { + StoreInfos []StoreInfo `json:"StoreInfos"` + UploadHosts []string `json:"UploadHosts"` + UploadHeader map[string]interface{} `json:"UploadHeader"` + SessionKey string `json:"SessionKey"` + Cloud string `json:"Cloud"` +} + +// FallbackUploadAddress 备用上传地址 +type FallbackUploadAddress struct { + StoreInfos []StoreInfo `json:"StoreInfos"` + UploadHosts []string `json:"UploadHosts"` + UploadHeader map[string]interface{} `json:"UploadHeader"` + SessionKey string `json:"SessionKey"` + Cloud string `json:"Cloud"` +} + +// UploadNode 上传节点信息 +type UploadNode struct { + Vid string `json:"Vid"` + Vids []string `json:"Vids"` + StoreInfos []StoreInfo `json:"StoreInfos"` + UploadHost string `json:"UploadHost"` + UploadHeader map[string]interface{} `json:"UploadHeader"` + Type string `json:"Type"` + Protocol string `json:"Protocol"` + SessionKey string `json:"SessionKey"` + NodeConfig struct { + UploadMode string `json:"UploadMode"` + } `json:"NodeConfig"` + Cluster string `json:"Cluster"` +} + +// AdvanceOption 高级选项 +type AdvanceOption struct { + Parallel int `json:"Parallel"` + Stream int `json:"Stream"` + SliceSize int `json:"SliceSize"` + EncryptionKey string `json:"EncryptionKey"` +} + +// InnerUploadAddress 内部上传地址 +type InnerUploadAddress struct { + UploadNodes []UploadNode `json:"UploadNodes"` + AdvanceOption AdvanceOption `json:"AdvanceOption"` +} + +// UploadPart 上传分片信息 +type UploadPart struct { + UploadId string `json:"uploadid,omitempty"` + PartNumber string `json:"part_number,omitempty"` + Crc32 string `json:"crc32,omitempty"` + Etag string `json:"etag,omitempty"` + Mode string `json:"mode,omitempty"` +} + +// UploadResp 上传响应体 +type UploadResp struct { + Code int `json:"code"` + ApiVersion string `json:"apiversion"` + Message string `json:"message"` + Data UploadPart `json:"data"` +} + +type VideoCommitUpload struct { + Vid string `json:"Vid"` + VideoMeta struct { + URI string `json:"Uri"` + Height int `json:"Height"` + Width int `json:"Width"` + OriginHeight int `json:"OriginHeight"` + OriginWidth int `json:"OriginWidth"` + Duration float64 `json:"Duration"` + Bitrate int `json:"Bitrate"` + Md5 string `json:"Md5"` + Format string `json:"Format"` + Size int `json:"Size"` + FileType string `json:"FileType"` + Codec string `json:"Codec"` + } `json:"VideoMeta"` + WorkflowInput struct { + TemplateID string `json:"TemplateId"` + } `json:"WorkflowInput"` + GetPosterMode string `json:"GetPosterMode"` +} + +type VideoCommitUploadResp struct { + ResponseMetadata ResponseMetadata `json:"ResponseMetadata"` + Result struct { + RequestID string `json:"RequestId"` + Results []VideoCommitUpload `json:"Results"` + } `json:"Result"` +} + +type CommonResp struct { + Code int `json:"code"` + Msg string `json:"msg,omitempty"` + Message string `json:"message,omitempty"` // 错误情况下的消息 + Data json.RawMessage `json:"data,omitempty"` // 原始数据,稍后解析 + Error *struct { + Code int `json:"code"` + Message string `json:"message"` + Locale string `json:"locale"` + } `json:"error,omitempty"` +} + +// IsSuccess 判断响应是否成功 +func (r *CommonResp) IsSuccess() bool { + return r.Code == 0 +} + +// GetError 获取错误信息 +func (r *CommonResp) GetError() error { + if r.IsSuccess() { + return nil + } + // 优先使用message字段 + errMsg := r.Message + if errMsg == "" { + errMsg = r.Msg + } + // 如果error对象存在且有详细消息,则使用error中的信息 + if r.Error != nil && r.Error.Message != "" { + errMsg = r.Error.Message + } + + return fmt.Errorf("[doubao] API error (code: %d): %s", r.Code, errMsg) +} + +// UnmarshalData 将data字段解析为指定类型 +func (r *CommonResp) UnmarshalData(v interface{}) error { + if !r.IsSuccess() { + return r.GetError() + } + + if len(r.Data) == 0 { + return nil + } + + return json.Unmarshal(r.Data, v) } diff --git a/drivers/doubao/util.go b/drivers/doubao/util.go index 977691c0..348c0aa0 100644 --- a/drivers/doubao/util.go +++ b/drivers/doubao/util.go @@ -1,38 +1,970 @@ package doubao import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" "errors" - + "fmt" "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/errgroup" "github.com/alist-org/alist/v3/pkg/utils" + "github.com/avast/retry-go" + "github.com/go-resty/resty/v2" + "github.com/google/uuid" log "github.com/sirupsen/logrus" + "hash/crc32" + "io" + "math" + "math/rand" + "net/http" + "net/url" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DirectoryType = 1 + FileType = 2 + LinkType = 3 + ImageType = 4 + PagesType = 5 + VideoType = 6 + AudioType = 7 + MeetingMinutesType = 8 +) + +var FileNodeType = map[int]string{ + 1: "directory", + 2: "file", + 3: "link", + 4: "image", + 5: "pages", + 6: "video", + 7: "audio", + 8: "meeting_minutes", +} + +const ( + BaseURL = "https://www.doubao.com" + FileDataType = "file" + ImgDataType = "image" + VideoDataType = "video" + DefaultChunkSize = int64(5 * 1024 * 1024) // 5MB + MaxRetryAttempts = 3 // 最大重试次数 + UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" + Region = "cn-north-1" + UploadTimeout = 3 * time.Minute ) // do others that not defined in Driver interface func (d *Doubao) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { - url := "https://www.doubao.com" + path + reqUrl := BaseURL + path req := base.RestyClient.R() req.SetHeader("Cookie", d.Cookie) if callback != nil { callback(req) } - var r BaseResp - req.SetResult(&r) - res, err := req.Execute(method, url) + + var commonResp CommonResp + + res, err := req.Execute(method, reqUrl) log.Debugln(res.String()) if err != nil { return nil, err } - // 业务状态码检查(优先于HTTP状态码) - if r.Code != 0 { - return res.Body(), errors.New(r.Msg) + body := res.Body() + // 先解析为通用响应 + if err = json.Unmarshal(body, &commonResp); err != nil { + return nil, err } + // 检查响应是否成功 + if !commonResp.IsSuccess() { + return body, commonResp.GetError() + } + if resp != nil { - err = utils.Json.Unmarshal(res.Body(), resp) + if err = json.Unmarshal(body, resp); err != nil { + return body, err + } + } + + return body, nil +} + +func (d *Doubao) getFiles(dirId, cursor string) (resp []File, err error) { + var r NodeInfoResp + + var body = base.Json{ + "node_id": dirId, + } + // 如果有游标,则设置游标和大小 + if cursor != "" { + body["cursor"] = cursor + body["size"] = 50 + } else { + body["need_full_path"] = false + } + + _, err = d.request("/samantha/aispace/node_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(body) + }, &r) + if err != nil { + return nil, err + } + + if r.Data.Children != nil { + resp = r.Data.Children + } + + if r.Data.NextCursor != "-1" { + // 递归获取下一页 + nextFiles, err := d.getFiles(dirId, r.Data.NextCursor) if err != nil { return nil, err } + + resp = append(r.Data.Children, nextFiles...) } + + return resp, err +} + +func (d *Doubao) getUserInfo() (UserInfo, error) { + var r UserInfoResp + + _, err := d.request("/passport/account/info/v2/", http.MethodGet, nil, &r) + if err != nil { + return UserInfo{}, err + } + + return r.Data, err +} + +// 签名请求 +func (d *Doubao) signRequest(req *resty.Request, method, tokenType, uploadUrl string) error { + parsedUrl, err := url.Parse(uploadUrl) + if err != nil { + return fmt.Errorf("invalid URL format: %w", err) + } + + var accessKeyId, secretAccessKey, sessionToken string + var serviceName string + + if tokenType == VideoDataType { + accessKeyId = d.UploadToken.Samantha.StsToken.AccessKeyID + secretAccessKey = d.UploadToken.Samantha.StsToken.SecretAccessKey + sessionToken = d.UploadToken.Samantha.StsToken.SessionToken + serviceName = "vod" + } else { + accessKeyId = d.UploadToken.Alice[tokenType].Auth.AccessKeyID + secretAccessKey = d.UploadToken.Alice[tokenType].Auth.SecretAccessKey + sessionToken = d.UploadToken.Alice[tokenType].Auth.SessionToken + serviceName = "imagex" + } + + // 当前时间,格式为 ISO8601 + now := time.Now().UTC() + amzDate := now.Format("20060102T150405Z") + dateStamp := now.Format("20060102") + + req.SetHeader("X-Amz-Date", amzDate) + + if sessionToken != "" { + req.SetHeader("X-Amz-Security-Token", sessionToken) + } + + // 计算请求体的SHA256哈希 + var bodyHash string + if req.Body != nil { + bodyBytes, ok := req.Body.([]byte) + if !ok { + return fmt.Errorf("request body must be []byte") + } + + bodyHash = hashSHA256(string(bodyBytes)) + req.SetHeader("X-Amz-Content-Sha256", bodyHash) + } else { + bodyHash = hashSHA256("") + } + + // 创建规范请求 + canonicalURI := parsedUrl.Path + if canonicalURI == "" { + canonicalURI = "/" + } + + // 查询参数按照字母顺序排序 + canonicalQueryString := getCanonicalQueryString(req.QueryParam) + // 规范请求头 + canonicalHeaders, signedHeaders := getCanonicalHeadersFromMap(req.Header) + canonicalRequest := method + "\n" + + canonicalURI + "\n" + + canonicalQueryString + "\n" + + canonicalHeaders + "\n" + + signedHeaders + "\n" + + bodyHash + + algorithm := "AWS4-HMAC-SHA256" + credentialScope := fmt.Sprintf("%s/%s/%s/aws4_request", dateStamp, Region, serviceName) + + stringToSign := algorithm + "\n" + + amzDate + "\n" + + credentialScope + "\n" + + hashSHA256(canonicalRequest) + // 计算签名密钥 + signingKey := getSigningKey(secretAccessKey, dateStamp, Region, serviceName) + // 计算签名 + signature := hmacSHA256Hex(signingKey, stringToSign) + // 构建授权头 + authorizationHeader := fmt.Sprintf( + "%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", + algorithm, + accessKeyId, + credentialScope, + signedHeaders, + signature, + ) + + req.SetHeader("Authorization", authorizationHeader) + + return nil +} + +func (d *Doubao) requestApi(url, method, tokenType string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + req := base.RestyClient.R() + req.SetHeaders(map[string]string{ + "user-agent": UserAgent, + }) + + if method == http.MethodPost { + req.SetHeader("Content-Type", "text/plain;charset=UTF-8") + } + + if callback != nil { + callback(req) + } + + if resp != nil { + req.SetResult(resp) + } + + // 使用自定义AWS SigV4签名 + err := d.signRequest(req, method, tokenType, url) + if err != nil { + return nil, err + } + + res, err := req.Execute(method, url) + if err != nil { + return nil, err + } + return res.Body(), nil } + +func (d *Doubao) initUploadToken() (*UploadToken, error) { + uploadToken := &UploadToken{ + Alice: make(map[string]UploadAuthToken), + Samantha: MediaUploadAuthToken{}, + } + + fileAuthToken, err := d.getUploadAuthToken(FileDataType) + if err != nil { + return nil, err + } + + imgAuthToken, err := d.getUploadAuthToken(ImgDataType) + if err != nil { + return nil, err + } + + mediaAuthToken, err := d.getSamantaUploadAuthToken() + if err != nil { + return nil, err + } + + uploadToken.Alice[FileDataType] = fileAuthToken + uploadToken.Alice[ImgDataType] = imgAuthToken + uploadToken.Samantha = mediaAuthToken + + return uploadToken, nil +} + +func (d *Doubao) getUploadAuthToken(dataType string) (ut UploadAuthToken, err error) { + var r UploadAuthTokenResp + _, err = d.request("/alice/upload/auth_token", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "scene": "bot_chat", + "data_type": dataType, + }) + }, &r) + + return r.Data, err +} + +func (d *Doubao) getSamantaUploadAuthToken() (mt MediaUploadAuthToken, err error) { + var r MediaUploadAuthTokenResp + _, err = d.request("/samantha/media/get_upload_token", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{}) + }, &r) + + return r.Data, err +} + +// getUploadConfig 获取上传配置信息 +func (d *Doubao) getUploadConfig(upConfig *UploadConfig, dataType string, file model.FileStreamer) error { + tokenType := dataType + // 配置参数函数 + configureParams := func() (string, map[string]string) { + var uploadUrl string + var params map[string]string + // 根据数据类型设置不同的上传参数 + switch dataType { + case VideoDataType: + // 音频/视频类型 - 使用uploadToken.Samantha的配置 + uploadUrl = d.UploadToken.Samantha.UploadInfo.VideoHost + params = map[string]string{ + "Action": "ApplyUploadInner", + "Version": "2020-11-19", + "SpaceName": d.UploadToken.Samantha.UploadInfo.SpaceName, + "FileType": "video", + "IsInner": "1", + "NeedFallback": "true", + "FileSize": strconv.FormatInt(file.GetSize(), 10), + "s": randomString(), + } + case ImgDataType, FileDataType: + // 图片或其他文件类型 - 使用uploadToken.Alice对应配置 + uploadUrl = "https://" + d.UploadToken.Alice[dataType].UploadHost + params = map[string]string{ + "Action": "ApplyImageUpload", + "Version": "2018-08-01", + "ServiceId": d.UploadToken.Alice[dataType].ServiceID, + "NeedFallback": "true", + "FileSize": strconv.FormatInt(file.GetSize(), 10), + "FileExtension": filepath.Ext(file.GetName()), + "s": randomString(), + } + } + return uploadUrl, params + } + + // 获取初始参数 + uploadUrl, params := configureParams() + + tokenRefreshed := false + var configResp UploadConfigResp + + err := d._retryOperation("get upload_config", func() error { + configResp = UploadConfigResp{} + + _, err := d.requestApi(uploadUrl, http.MethodGet, tokenType, func(req *resty.Request) { + req.SetQueryParams(params) + }, &configResp) + if err != nil { + return err + } + + if configResp.ResponseMetadata.Error.Code == "" { + *upConfig = configResp.Result + return nil + } + + // 100028 凭证过期 + if configResp.ResponseMetadata.Error.CodeN == 100028 && !tokenRefreshed { + log.Debugln("[doubao] Upload token expired, re-fetching...") + newToken, err := d.initUploadToken() + if err != nil { + return fmt.Errorf("failed to refresh token: %w", err) + } + + d.UploadToken = newToken + tokenRefreshed = true + uploadUrl, params = configureParams() + + return retry.Error{errors.New("token refreshed, retry needed")} + } + + return fmt.Errorf("get upload_config failed: %s", configResp.ResponseMetadata.Error.Message) + }) + + return err +} + +// uploadNode 上传 文件信息 +func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file model.FileStreamer, dataType string) (UploadNodeResp, error) { + reqUuid := uuid.New().String() + var key string + var nodeType int + + mimetype := file.GetMimetype() + switch dataType { + case VideoDataType: + key = uploadConfig.InnerUploadAddress.UploadNodes[0].Vid + if strings.HasPrefix(mimetype, "audio/") { + nodeType = AudioType // 音频类型 + } else { + nodeType = VideoType // 视频类型 + } + case ImgDataType: + key = uploadConfig.InnerUploadAddress.UploadNodes[0].StoreInfos[0].StoreURI + nodeType = ImageType // 图片类型 + default: // FileDataType + key = uploadConfig.InnerUploadAddress.UploadNodes[0].StoreInfos[0].StoreURI + nodeType = FileType // 文件类型 + } + + var r UploadNodeResp + _, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "node_list": []base.Json{ + { + "local_id": reqUuid, + "parent_id": dir.GetID(), + "name": file.GetName(), + "key": key, + "node_content": base.Json{}, + "node_type": nodeType, + "size": file.GetSize(), + }, + }, + "request_id": reqUuid, + }) + }, &r) + + return r, err +} + +// Upload 普通上传实现 +func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) { + data, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + // 计算CRC32 + crc32Hash := crc32.NewIEEE() + crc32Hash.Write(data) + crc32Value := hex.EncodeToString(crc32Hash.Sum(nil)) + + // 构建请求路径 + uploadNode := config.InnerUploadAddress.UploadNodes[0] + storeInfo := uploadNode.StoreInfos[0] + uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI) + + uploadResp := UploadResp{} + + if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { + req.SetHeaders(map[string]string{ + "Content-Type": "application/octet-stream", + "Content-Crc32": crc32Value, + "Content-Length": fmt.Sprintf("%d", len(data)), + "Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)), + }) + + req.SetBody(data) + }, &uploadResp); err != nil { + return nil, err + } + + if uploadResp.Code != 2000 { + return nil, fmt.Errorf("upload failed: %s", uploadResp.Message) + } + + uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType) + if err != nil { + return nil, err + } + + return &model.Object{ + ID: uploadNodeResp.Data.NodeList[0].ID, + Name: uploadNodeResp.Data.NodeList[0].Name, + Size: file.GetSize(), + IsFolder: false, + }, nil +} + +// UploadByMultipart 分片上传 +func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fileSize int64, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) { + // 构建请求路径 + uploadNode := config.InnerUploadAddress.UploadNodes[0] + storeInfo := uploadNode.StoreInfos[0] + uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI) + // 初始化分片上传 + var uploadID string + err := d._retryOperation("Initialize multipart upload", func() error { + var err error + uploadID, err = d.initMultipartUpload(config, uploadUrl, storeInfo) + return err + }) + if err != nil { + return nil, fmt.Errorf("failed to initialize multipart upload: %w", err) + } + // 准备分片参数 + chunkSize := DefaultChunkSize + if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 { + chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize) + } + totalParts := (fileSize + chunkSize - 1) / chunkSize + // 创建分片信息组 + parts := make([]UploadPart, totalParts) + // 缓存文件 + tempFile, err := file.CacheFullInTempFile() + if err != nil { + return nil, fmt.Errorf("failed to cache file: %w", err) + } + defer tempFile.Close() + up(10.0) // 更新进度 + // 设置并行上传 + threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, + retry.Attempts(1), + retry.Delay(time.Second), + retry.DelayType(retry.BackOffDelay)) + + var partsMutex sync.Mutex + // 并行上传所有分片 + for partIndex := int64(0); partIndex < totalParts; partIndex++ { + if utils.IsCanceled(uploadCtx) { + break + } + partIndex := partIndex + partNumber := partIndex + 1 // 分片编号从1开始 + + threadG.Go(func(ctx context.Context) error { + // 计算此分片的大小和偏移 + offset := partIndex * chunkSize + size := chunkSize + if partIndex == totalParts-1 { + size = fileSize - offset + } + + limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size)) + // 读取数据到内存 + data, err := io.ReadAll(limitedReader) + if err != nil { + return fmt.Errorf("failed to read part %d: %w", partNumber, err) + } + // 计算CRC32 + crc32Value := calculateCRC32(data) + // 使用_retryOperation上传分片 + var uploadPart UploadPart + if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error { + var err error + uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value) + return err + }); err != nil { + return fmt.Errorf("part %d upload failed: %w", partNumber, err) + } + // 记录成功上传的分片 + partsMutex.Lock() + parts[partIndex] = UploadPart{ + PartNumber: strconv.FormatInt(partNumber, 10), + Etag: uploadPart.Etag, + Crc32: crc32Value, + } + partsMutex.Unlock() + // 更新进度 + progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts) + up(math.Min(progress, 95.0)) + + return nil + }) + } + + if err = threadG.Wait(); err != nil { + return nil, err + } + // 完成上传-分片合并 + if err = d._retryOperation("Complete multipart upload", func() error { + return d.completeMultipartUpload(config, uploadUrl, uploadID, parts) + }); err != nil { + return nil, fmt.Errorf("failed to complete multipart upload: %w", err) + } + // 提交上传 + if err = d._retryOperation("Commit upload", func() error { + return d.commitMultipartUpload(config) + }); err != nil { + return nil, fmt.Errorf("failed to commit upload: %w", err) + } + + up(98.0) // 更新到98% + // 上传节点信息 + var uploadNodeResp UploadNodeResp + + if err = d._retryOperation("Upload node", func() error { + var err error + uploadNodeResp, err = d.uploadNode(config, dstDir, file, dataType) + return err + }); err != nil { + return nil, fmt.Errorf("failed to upload node: %w", err) + } + + up(100.0) // 完成上传 + + return &model.Object{ + ID: uploadNodeResp.Data.NodeList[0].ID, + Name: uploadNodeResp.Data.NodeList[0].Name, + Size: file.GetSize(), + IsFolder: false, + }, nil +} + +// 统一上传请求方法 +func (d *Doubao) uploadRequest(uploadUrl string, method string, storeInfo StoreInfo, callback base.ReqCallback, resp interface{}) ([]byte, error) { + client := resty.New() + client.SetTransport(&http.Transport{ + DisableKeepAlives: true, // 禁用连接复用 + ForceAttemptHTTP2: false, // 强制使用HTTP/1.1 + }) + client.SetTimeout(UploadTimeout) + + req := client.R() + req.SetHeaders(map[string]string{ + "Host": strings.Split(uploadUrl, "/")[2], + "Referer": BaseURL + "/", + "Origin": BaseURL, + "User-Agent": UserAgent, + "X-Storage-U": d.UserId, + "Authorization": storeInfo.Auth, + }) + + if method == http.MethodPost { + req.SetHeader("Content-Type", "text/plain;charset=UTF-8") + } + + if callback != nil { + callback(req) + } + + if resp != nil { + req.SetResult(resp) + } + + res, err := req.Execute(method, uploadUrl) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("upload request failed: %w", err) + } + + return res.Body(), nil +} + +// 初始化分片上传 +func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, storeInfo StoreInfo) (uploadId string, err error) { + uploadResp := UploadResp{} + + _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { + req.SetQueryParams(map[string]string{ + "uploadmode": "part", + "phase": "init", + }) + }, &uploadResp) + + if err != nil { + return uploadId, err + } + + if uploadResp.Code != 2000 { + return uploadId, fmt.Errorf("init upload failed: %s", uploadResp.Message) + } + + return uploadResp.Data.UploadId, nil +} + +// 分片上传实现 +func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) { + uploadResp := UploadResp{} + storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0] + + _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { + req.SetHeaders(map[string]string{ + "Content-Type": "application/octet-stream", + "Content-Crc32": crc32Value, + "Content-Length": fmt.Sprintf("%d", len(data)), + "Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)), + }) + + req.SetQueryParams(map[string]string{ + "uploadid": uploadID, + "part_number": strconv.FormatInt(partNumber, 10), + "phase": "transfer", + }) + + req.SetBody(data) + req.SetContentLength(true) + }, &uploadResp) + + if err != nil { + return resp, err + } + + if uploadResp.Code != 2000 { + return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message) + } else if uploadResp.Data.Crc32 != crc32Value { + return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32) + } + + return uploadResp.Data, nil +} + +// 完成分片上传 +func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error { + uploadResp := UploadResp{} + + storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0] + + body := _convertUploadParts(parts) + + err := utils.Retry(MaxRetryAttempts, time.Second, func() (err error) { + _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { + req.SetQueryParams(map[string]string{ + "uploadid": uploadID, + "phase": "finish", + "uploadmode": "part", + }) + req.SetBody(body) + }, &uploadResp) + + if err != nil { + return err + } + // 检查响应状态码 2000 成功 4024 分片合并中 + if uploadResp.Code != 2000 && uploadResp.Code != 4024 { + return fmt.Errorf("finish upload failed: %s", uploadResp.Message) + } + + return err + }) + + if err != nil { + return fmt.Errorf("failed to complete multipart upload: %w", err) + } + + return nil +} + +func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error { + uploadUrl := d.UploadToken.Samantha.UploadInfo.VideoHost + params := map[string]string{ + "Action": "CommitUploadInner", + "Version": "2020-11-19", + "SpaceName": d.UploadToken.Samantha.UploadInfo.SpaceName, + } + tokenType := VideoDataType + + videoCommitUploadResp := VideoCommitUploadResp{} + + jsonBytes, err := json.Marshal(base.Json{ + "SessionKey": uploadConfig.InnerUploadAddress.UploadNodes[0].SessionKey, + "Functions": []base.Json{}, + }) + if err != nil { + return fmt.Errorf("failed to marshal request data: %w", err) + } + + _, err = d.requestApi(uploadUrl, http.MethodPost, tokenType, func(req *resty.Request) { + req.SetHeader("Content-Type", "application/json") + req.SetQueryParams(params) + req.SetBody(jsonBytes) + + }, &videoCommitUploadResp) + if err != nil { + return err + } + + return nil +} + +// 计算CRC32 +func calculateCRC32(data []byte) string { + hash := crc32.NewIEEE() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// _retryOperation 操作重试 +func (d *Doubao) _retryOperation(operation string, fn func() error) error { + return retry.Do( + fn, + retry.Attempts(MaxRetryAttempts), + retry.Delay(500*time.Millisecond), + retry.DelayType(retry.BackOffDelay), + retry.MaxJitter(200*time.Millisecond), + retry.OnRetry(func(n uint, err error) { + log.Debugf("[doubao] %s retry #%d: %v", operation, n+1, err) + }), + ) +} + +// _convertUploadParts 将分片信息转换为字符串 +func _convertUploadParts(parts []UploadPart) string { + if len(parts) == 0 { + return "" + } + + var result strings.Builder + + for i, part := range parts { + if i > 0 { + result.WriteString(",") + } + result.WriteString(fmt.Sprintf("%s:%s", part.PartNumber, part.Crc32)) + } + + return result.String() +} + +// 获取规范查询字符串 +func getCanonicalQueryString(query url.Values) string { + if len(query) == 0 { + return "" + } + + keys := make([]string, 0, len(query)) + for k := range query { + keys = append(keys, k) + } + sort.Strings(keys) + + parts := make([]string, 0, len(keys)) + for _, k := range keys { + values := query[k] + for _, v := range values { + parts = append(parts, urlEncode(k)+"="+urlEncode(v)) + } + } + + return strings.Join(parts, "&") +} + +func urlEncode(s string) string { + s = url.QueryEscape(s) + s = strings.ReplaceAll(s, "+", "%20") + return s +} + +// 获取规范头信息和已签名头列表 +func getCanonicalHeadersFromMap(headers map[string][]string) (string, string) { + // 不可签名的头部列表 + unsignableHeaders := map[string]bool{ + "authorization": true, + "content-type": true, + "content-length": true, + "user-agent": true, + "presigned-expires": true, + "expect": true, + "x-amzn-trace-id": true, + } + headerValues := make(map[string]string) + var signedHeadersList []string + + for k, v := range headers { + if len(v) == 0 { + continue + } + + lowerKey := strings.ToLower(k) + // 检查是否可签名 + if strings.HasPrefix(lowerKey, "x-amz-") || !unsignableHeaders[lowerKey] { + value := strings.TrimSpace(v[0]) + value = strings.Join(strings.Fields(value), " ") + headerValues[lowerKey] = value + signedHeadersList = append(signedHeadersList, lowerKey) + } + } + + sort.Strings(signedHeadersList) + + var canonicalHeadersStr strings.Builder + for _, key := range signedHeadersList { + canonicalHeadersStr.WriteString(key) + canonicalHeadersStr.WriteString(":") + canonicalHeadersStr.WriteString(headerValues[key]) + canonicalHeadersStr.WriteString("\n") + } + + signedHeaders := strings.Join(signedHeadersList, ";") + + return canonicalHeadersStr.String(), signedHeaders +} + +// 计算HMAC-SHA256 +func hmacSHA256(key []byte, data string) []byte { + h := hmac.New(sha256.New, key) + h.Write([]byte(data)) + return h.Sum(nil) +} + +// 计算HMAC-SHA256并返回十六进制字符串 +func hmacSHA256Hex(key []byte, data string) string { + return hex.EncodeToString(hmacSHA256(key, data)) +} + +// 计算SHA256哈希并返回十六进制字符串 +func hashSHA256(data string) string { + h := sha256.New() + h.Write([]byte(data)) + return hex.EncodeToString(h.Sum(nil)) +} + +// 获取签名密钥 +func getSigningKey(secretKey, dateStamp, region, service string) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), dateStamp) + kRegion := hmacSHA256(kDate, region) + kService := hmacSHA256(kRegion, service) + kSigning := hmacSHA256(kService, "aws4_request") + return kSigning +} + +// generateContentDisposition 生成符合RFC 5987标准的Content-Disposition头部 +func generateContentDisposition(filename string) string { + // 按照RFC 2047进行编码,用于filename部分 + encodedName := urlEncode(filename) + + // 按照RFC 5987进行编码,用于filename*部分 + encodedNameRFC5987 := encodeRFC5987(filename) + + return fmt.Sprintf("attachment; filename=\"%s\"; filename*=utf-8''%s", + encodedName, encodedNameRFC5987) +} + +// encodeRFC5987 按照RFC 5987规范编码字符串,适用于HTTP头部参数中的非ASCII字符 +func encodeRFC5987(s string) string { + var buf strings.Builder + for _, r := range []byte(s) { + // 根据RFC 5987,只有字母、数字和部分特殊符号可以不编码 + if (r >= 'a' && r <= 'z') || + (r >= 'A' && r <= 'Z') || + (r >= '0' && r <= '9') || + r == '-' || r == '.' || r == '_' || r == '~' { + buf.WriteByte(r) + } else { + // 其他字符都需要百分号编码 + fmt.Fprintf(&buf, "%%%02X", r) + } + } + return buf.String() +} + +func randomString() string { + const charset = "0123456789abcdefghijklmnopqrstuvwxyz" + const length = 11 // 11位随机字符串 + + var sb strings.Builder + sb.Grow(length) + + for i := 0; i < length; i++ { + sb.WriteByte(charset[rand.Intn(len(charset))]) + } + + return sb.String() +} From 88abb323cb8e596e8053ce57f890dcc7286fe012 Mon Sep 17 00:00:00 2001 From: Lee CQ <47050568+lee-cq@users.noreply.github.com> Date: Sat, 12 Apr 2025 17:27:56 +0800 Subject: [PATCH 170/187] feat(url-tree): implement the Put interface to support adding links directly to the UrlTree on the web side (#8312) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(url-tree)支持PUT * feat(url-tree) UrlTree更新时,需要将路径和内容分割 #8303 * fix: stdpath.Join call Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Andy Hsu Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- drivers/url_tree/driver.go | 20 +++++++++++++++++++- internal/op/fs.go | 7 +++++++ internal/op/path.go | 28 ++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/drivers/url_tree/driver.go b/drivers/url_tree/driver.go index f97d5cc5..049bd2db 100644 --- a/drivers/url_tree/driver.go +++ b/drivers/url_tree/driver.go @@ -243,7 +243,25 @@ func (d *Urls) PutURL(ctx context.Context, dstDir model.Obj, name, url string) ( } func (d *Urls) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - return errs.UploadNotSupported + if !d.Writable { + return errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + node := GetNodeFromRootByPath(d.root, dstDir.GetPath()) // parent + if node == nil { + return errs.ObjectNotFound + } + if node.isFile() { + return errs.NotFolder + } + file, err := parseFileLine(stream.GetName(), d.HeadSize) + if err != nil { + return err + } + node.Children = append(node.Children, file) + d.updateStorage() + return nil } func (d *Urls) updateStorage() { diff --git a/internal/op/fs.go b/internal/op/fs.go index 99c2fe34..64e99335 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -10,6 +10,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/generic_sync" "github.com/alist-org/alist/v3/pkg/singleflight" "github.com/alist-org/alist/v3/pkg/utils" @@ -517,6 +518,12 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod log.Errorf("failed to close file streamer, %v", err) } }() + // UrlTree PUT + if storage.GetStorage().Driver == "UrlTree" { + var link string + dstDirPath, link = urlTreeSplitLineFormPath(stdpath.Join(dstDirPath, file.GetName())) + file = &stream.FileStream{Obj: &model.Object{Name: link}} + } // if file exist and size = 0, delete it dstDirPath = utils.FixAndCleanPath(dstDirPath) dstPath := stdpath.Join(dstDirPath, file.GetName()) diff --git a/internal/op/path.go b/internal/op/path.go index 27f7e183..912a0000 100644 --- a/internal/op/path.go +++ b/internal/op/path.go @@ -2,6 +2,7 @@ package op import ( "github.com/alist-org/alist/v3/internal/errs" + stdpath "path" "strings" "github.com/alist-org/alist/v3/internal/driver" @@ -27,3 +28,30 @@ func GetStorageAndActualPath(rawPath string) (storage driver.Driver, actualPath actualPath = utils.FixAndCleanPath(strings.TrimPrefix(rawPath, mountPath)) return } + +// urlTreeSplitLineFormPath 分割path中分割真实路径和UrlTree定义字符串 +func urlTreeSplitLineFormPath(path string) (pp string, file string) { + // url.PathUnescape 会移除 // ,手动加回去 + path = strings.Replace(path, "https:/", "https://", 1) + path = strings.Replace(path, "http:/", "http://", 1) + if strings.Contains(path, ":https:/") || strings.Contains(path, ":http:/") { + // URL-Tree模式 /url_tree_drivr/file_name[:size[:time]]:https://example.com/file + fPath := strings.SplitN(path, ":", 2)[0] + pp, _ = stdpath.Split(fPath) + file = path[len(pp):] + } else if strings.Contains(path, "/https:/") || strings.Contains(path, "/http:/") { + // URL-Tree模式 /url_tree_drivr/https://example.com/file + index := strings.Index(path, "/http://") + if index == -1 { + index = strings.Index(path, "/https://") + } + pp = path[:index] + file = path[index+1:] + } else { + pp, file = stdpath.Split(path) + } + if pp == "" { + pp = "/" + } + return +} From 0a9921fa7948ba51c5f1500cdf7e75bb4658afab Mon Sep 17 00:00:00 2001 From: Yifan Gao Date: Sat, 19 Apr 2025 14:22:12 +0800 Subject: [PATCH 171/187] fix(aliyundrive_open): resolve file duplication issues and improve path handling (#8358) * fix(aliyundrive_open): resolve file duplication issues and improve path handling 1. Fix file duplication by implementing a new removeDuplicateFiles method that cleans up duplicate files after operations 2. Change Move operation to use "ignore" for check_name_mode instead of "refuse" to allow moves when destination has same filename 3. Set Copy operation to handle duplicates by removing them after successful copy 4. Improve path handling for all file operations (Move, Rename, Put, MakeDir) by properly maintaining the full path of objects 5. Implement GetRoot interface for proper root object initialization with correct path 6. Add proper path management in List operation to ensure objects have correct paths 7. Fix path handling in error cases and improve logging of failures * refactor(aliyundrive_open): change error logging to warnings for duplicate file removal Updated the Move, Rename, and Copy methods to log warnings instead of errors when duplicate file removal fails, as the primary operations have already completed successfully. This improves the clarity of logs without affecting the functionality. * Update drivers/aliyundrive_open/util.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- drivers/aliyundrive_open/driver.go | 99 ++++++++++++++++++++++++++---- drivers/aliyundrive_open/util.go | 34 ++++++++++ 2 files changed, 121 insertions(+), 12 deletions(-) diff --git a/drivers/aliyundrive_open/driver.go b/drivers/aliyundrive_open/driver.go index a65ba05c..394eadb1 100644 --- a/drivers/aliyundrive_open/driver.go +++ b/drivers/aliyundrive_open/driver.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "path/filepath" "time" "github.com/Xhofe/rateg" @@ -14,6 +15,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" ) type AliyundriveOpen struct { @@ -72,6 +74,18 @@ func (d *AliyundriveOpen) Drop(ctx context.Context) error { return nil } +// GetRoot implements the driver.GetRooter interface to properly set up the root object +func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) { + return &model.Object{ + ID: d.RootFolderID, + Path: "/", + Name: "root", + Size: 0, + Modified: d.Modified, + IsFolder: true, + }, nil +} + func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { if d.limitList == nil { return nil, fmt.Errorf("driver not init") @@ -80,9 +94,17 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li if err != nil { return nil, err } - return utils.SliceConvert(files, func(src File) (model.Obj, error) { - return fileToObj(src), nil + + objs, err := utils.SliceConvert(files, func(src File) (model.Obj, error) { + obj := fileToObj(src) + // Set the correct path for the object + if dir.GetPath() != "" { + obj.Path = filepath.Join(dir.GetPath(), obj.GetName()) + } + return obj, nil }) + + return objs, err } func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) { @@ -132,7 +154,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN if err != nil { return nil, err } - return fileToObj(newDir), nil + obj := fileToObj(newDir) + + // Set the correct Path for the returned directory object + if parentDir.GetPath() != "" { + obj.Path = filepath.Join(parentDir.GetPath(), dirName) + } else { + obj.Path = "/" + dirName + } + + return obj, nil } func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { @@ -142,20 +173,24 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m "drive_id": d.DriveId, "file_id": srcObj.GetID(), "to_parent_file_id": dstDir.GetID(), - "check_name_mode": "refuse", // optional:ignore,auto_rename,refuse + "check_name_mode": "ignore", // optional:ignore,auto_rename,refuse //"new_name": "newName", // The new name to use when a file of the same name exists }).SetResult(&resp) }) if err != nil { return nil, err } - if resp.Exist { - return nil, errors.New("existence of files with the same name") - } if srcObj, ok := srcObj.(*model.ObjThumb); ok { srcObj.ID = resp.FileID srcObj.Modified = time.Now() + srcObj.Path = filepath.Join(dstDir.GetPath(), srcObj.GetName()) + + // Check for duplicate files in the destination directory + if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), srcObj.GetID()); err != nil { + // Only log a warning instead of returning an error since the move operation has already completed successfully + log.Warnf("Failed to remove duplicate files after move: %v", err) + } return srcObj, nil } return nil, nil @@ -173,19 +208,47 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName if err != nil { return nil, err } - return fileToObj(newFile), nil + + // Check for duplicate files in the parent directory + parentPath := filepath.Dir(srcObj.GetPath()) + if err := d.removeDuplicateFiles(ctx, parentPath, newName, newFile.FileId); err != nil { + // Only log a warning instead of returning an error since the rename operation has already completed successfully + log.Warnf("Failed to remove duplicate files after rename: %v", err) + } + + obj := fileToObj(newFile) + + // Set the correct Path for the renamed object + if parentPath != "" && parentPath != "." { + obj.Path = filepath.Join(parentPath, newName) + } else { + obj.Path = "/" + newName + } + + return obj, nil } func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + var resp MoveOrCopyResp _, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "drive_id": d.DriveId, "file_id": srcObj.GetID(), "to_parent_file_id": dstDir.GetID(), - "auto_rename": true, - }) + "auto_rename": false, + }).SetResult(&resp) }) - return err + if err != nil { + return err + } + + // Check for duplicate files in the destination directory + if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), resp.FileID); err != nil { + // Only log a warning instead of returning an error since the copy operation has already completed successfully + log.Warnf("Failed to remove duplicate files after copy: %v", err) + } + + return nil } func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error { @@ -203,7 +266,18 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error { } func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - return d.upload(ctx, dstDir, stream, up) + obj, err := d.upload(ctx, dstDir, stream, up) + + // Set the correct Path for the returned file object + if obj != nil && obj.GetPath() == "" { + if dstDir.GetPath() != "" { + if objWithPath, ok := obj.(model.SetPath); ok { + objWithPath.SetPath(filepath.Join(dstDir.GetPath(), obj.GetName())) + } + } + } + + return obj, err } func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { @@ -235,3 +309,4 @@ var _ driver.MkdirResult = (*AliyundriveOpen)(nil) var _ driver.MoveResult = (*AliyundriveOpen)(nil) var _ driver.RenameResult = (*AliyundriveOpen)(nil) var _ driver.PutResult = (*AliyundriveOpen)(nil) +var _ driver.GetRooter = (*AliyundriveOpen)(nil) diff --git a/drivers/aliyundrive_open/util.go b/drivers/aliyundrive_open/util.go index 659d7da7..c3cda10a 100644 --- a/drivers/aliyundrive_open/util.go +++ b/drivers/aliyundrive_open/util.go @@ -10,6 +10,7 @@ import ( "time" "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" @@ -186,3 +187,36 @@ func (d *AliyundriveOpen) getAccessToken() string { } return d.AccessToken } + +// Remove duplicate files with the same name in the given directory path, +// preserving the file with the given skipID if provided +func (d *AliyundriveOpen) removeDuplicateFiles(ctx context.Context, parentPath string, fileName string, skipID string) error { + // Handle empty path (root directory) case + if parentPath == "" { + parentPath = "/" + } + + // List all files in the parent directory + files, err := op.List(ctx, d, parentPath, model.ListArgs{}) + if err != nil { + return err + } + + // Find all files with the same name + var duplicates []model.Obj + for _, file := range files { + if file.GetName() == fileName && file.GetID() != skipID { + duplicates = append(duplicates, file) + } + } + + // Remove all duplicates files, except the file with the given ID + for _, file := range duplicates { + err := d.Remove(ctx, file) + if err != nil { + return err + } + } + + return nil +} From 477c43971f44a591b14506be3fc36f4acc7d1f9a Mon Sep 17 00:00:00 2001 From: asdfghjkl <61342682+anobodys@users.noreply.github.com> Date: Sat, 19 Apr 2025 14:22:43 +0800 Subject: [PATCH 172/187] feat(doubao_share): support doubao_share link (#8376) Co-authored-by: anobodys --- drivers/all.go | 1 + drivers/doubao_share/driver.go | 177 ++++++++ drivers/doubao_share/meta.go | 32 ++ drivers/doubao_share/types.go | 207 +++++++++ drivers/doubao_share/util.go | 744 +++++++++++++++++++++++++++++++++ 5 files changed, 1161 insertions(+) create mode 100644 drivers/doubao_share/driver.go create mode 100644 drivers/doubao_share/meta.go create mode 100644 drivers/doubao_share/types.go create mode 100644 drivers/doubao_share/util.go diff --git a/drivers/all.go b/drivers/all.go index 083d01dc..0b8ce3aa 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -24,6 +24,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/cloudreve" _ "github.com/alist-org/alist/v3/drivers/crypt" _ "github.com/alist-org/alist/v3/drivers/doubao" + _ "github.com/alist-org/alist/v3/drivers/doubao_share" _ "github.com/alist-org/alist/v3/drivers/dropbox" _ "github.com/alist-org/alist/v3/drivers/febbox" _ "github.com/alist-org/alist/v3/drivers/ftp" diff --git a/drivers/doubao_share/driver.go b/drivers/doubao_share/driver.go new file mode 100644 index 00000000..61076d1e --- /dev/null +++ b/drivers/doubao_share/driver.go @@ -0,0 +1,177 @@ +package doubao_share + +import ( + "context" + "errors" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/go-resty/resty/v2" + "net/http" +) + +type DoubaoShare struct { + model.Storage + Addition + RootFiles []RootFileList +} + +func (d *DoubaoShare) Config() driver.Config { + return config +} + +func (d *DoubaoShare) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *DoubaoShare) Init(ctx context.Context) error { + // 初始化 虚拟分享列表 + if err := d.initShareList(); err != nil { + return err + } + + return nil +} + +func (d *DoubaoShare) Drop(ctx context.Context) error { + return nil +} + +func (d *DoubaoShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + // 检查是否为根目录 + if dir.GetID() == "" && dir.GetPath() == "/" { + return d.listRootDirectory(ctx) + } + + // 非根目录,处理不同情况 + if fo, ok := dir.(*FileObject); ok { + if fo.ShareID == "" { + // 虚拟目录,需要列出子目录 + return d.listVirtualDirectoryContent(dir) + } else { + // 具有分享ID的目录,获取此分享下的文件 + shareId, relativePath, err := d._findShareAndPath(dir) + if err != nil { + return nil, err + } + return d.getFilesInPath(ctx, shareId, dir.GetID(), relativePath) + } + } + + // 使用通用方法 + shareId, relativePath, err := d._findShareAndPath(dir) + if err != nil { + return nil, err + } + + // 获取指定路径下的文件 + return d.getFilesInPath(ctx, shareId, dir.GetID(), relativePath) +} + +func (d *DoubaoShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var downloadUrl string + + if u, ok := file.(*FileObject); ok { + switch u.NodeType { + case VideoType, AudioType: + var r GetVideoFileUrlResp + _, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "key": u.Key, + "share_id": u.ShareID, + "node_id": file.GetID(), + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.OriginalMediaInfo.MainURL + default: + var r GetFileUrlResp + _, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{u.Key}, + "type": FileNodeType[u.NodeType], + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.FileUrls[0].MainURL + } + + // 生成标准的Content-Disposition + contentDisposition := generateContentDisposition(u.Name) + + return &model.Link{ + URL: downloadUrl, + Header: http.Header{ + "User-Agent": []string{UserAgent}, + "Content-Disposition": []string{contentDisposition}, + }, + }, nil + } + + return nil, errors.New("can't convert obj to URL") +} + +func (d *DoubaoShare) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + // TODO create folder, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO move obj, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + // TODO rename obj, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO copy obj, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Remove(ctx context.Context, obj model.Obj) error { + // TODO remove obj, optional + return errs.NotImplement +} + +func (d *DoubaoShare) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + // TODO upload file, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { + // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional + // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir + // return errs.NotImplement to use an internal archive tool + return nil, errs.NotImplement +} + +//func (d *DoubaoShare) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*DoubaoShare)(nil) diff --git a/drivers/doubao_share/meta.go b/drivers/doubao_share/meta.go new file mode 100644 index 00000000..a749eefb --- /dev/null +++ b/drivers/doubao_share/meta.go @@ -0,0 +1,32 @@ +package doubao_share + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootPath + Cookie string `json:"cookie" type:"text"` + ShareIds string `json:"share_ids" type:"text" required:"true"` +} + +var config = driver.Config{ + Name: "DoubaoShare", + LocalSort: true, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: true, + NeedMs: false, + DefaultRoot: "/", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &DoubaoShare{} + }) +} diff --git a/drivers/doubao_share/types.go b/drivers/doubao_share/types.go new file mode 100644 index 00000000..46f226fa --- /dev/null +++ b/drivers/doubao_share/types.go @@ -0,0 +1,207 @@ +package doubao_share + +import ( + "encoding/json" + "fmt" + "github.com/alist-org/alist/v3/internal/model" +) + +type BaseResp struct { + Code int `json:"code"` + Msg string `json:"msg"` +} + +type NodeInfoData struct { + Share ShareInfo `json:"share,omitempty"` + Creator CreatorInfo `json:"creator,omitempty"` + NodeList []File `json:"node_list,omitempty"` + NodeInfo File `json:"node_info,omitempty"` + Children []File `json:"children,omitempty"` + Path FilePath `json:"path,omitempty"` + NextCursor string `json:"next_cursor,omitempty"` + HasMore bool `json:"has_more,omitempty"` +} + +type NodeInfoResp struct { + BaseResp + NodeInfoData `json:"data"` +} + +type RootFileList struct { + ShareID string + VirtualPath string + NodeInfo NodeInfoData + Child *[]RootFileList +} + +type File struct { + ID string `json:"id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` + Size int64 `json:"size"` + Source int `json:"source"` + NameReviewStatus int `json:"name_review_status"` + ContentReviewStatus int `json:"content_review_status"` + RiskReviewStatus int `json:"risk_review_status"` + ConversationID string `json:"conversation_id"` + ParentID string `json:"parent_id"` + CreateTime int64 `json:"create_time"` + UpdateTime int64 `json:"update_time"` +} + +type FileObject struct { + model.Object + ShareID string + Key string + NodeID string + NodeType int +} + +type ShareInfo struct { + ShareID string `json:"share_id"` + FirstNode struct { + ID string `json:"id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` + Size int `json:"size"` + Source int `json:"source"` + Content struct { + LinkFileType string `json:"link_file_type"` + ImageWidth int `json:"image_width"` + ImageHeight int `json:"image_height"` + AiSkillStatus int `json:"ai_skill_status"` + } `json:"content"` + NameReviewStatus int `json:"name_review_status"` + ContentReviewStatus int `json:"content_review_status"` + RiskReviewStatus int `json:"risk_review_status"` + ConversationID string `json:"conversation_id"` + ParentID string `json:"parent_id"` + CreateTime int `json:"create_time"` + UpdateTime int `json:"update_time"` + } `json:"first_node"` + NodeCount int `json:"node_count"` + CreateTime int `json:"create_time"` + Channel string `json:"channel"` + InfluencerType int `json:"influencer_type"` +} + +type CreatorInfo struct { + EntityID string `json:"entity_id"` + UserName string `json:"user_name"` + NickName string `json:"nick_name"` + Avatar struct { + OriginURL string `json:"origin_url"` + TinyURL string `json:"tiny_url"` + URI string `json:"uri"` + } `json:"avatar"` +} + +type FilePath []struct { + ID string `json:"id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` + Size int `json:"size"` + Source int `json:"source"` + NameReviewStatus int `json:"name_review_status"` + ContentReviewStatus int `json:"content_review_status"` + RiskReviewStatus int `json:"risk_review_status"` + ConversationID string `json:"conversation_id"` + ParentID string `json:"parent_id"` + CreateTime int `json:"create_time"` + UpdateTime int `json:"update_time"` +} + +type GetFileUrlResp struct { + BaseResp + Data struct { + FileUrls []struct { + URI string `json:"uri"` + MainURL string `json:"main_url"` + BackURL string `json:"back_url"` + } `json:"file_urls"` + } `json:"data"` +} + +type GetVideoFileUrlResp struct { + BaseResp + Data struct { + MediaType string `json:"media_type"` + MediaInfo []struct { + Meta struct { + Height string `json:"height"` + Width string `json:"width"` + Format string `json:"format"` + Duration float64 `json:"duration"` + CodecType string `json:"codec_type"` + Definition string `json:"definition"` + } `json:"meta"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"media_info"` + OriginalMediaInfo struct { + Meta struct { + Height string `json:"height"` + Width string `json:"width"` + Format string `json:"format"` + Duration float64 `json:"duration"` + CodecType string `json:"codec_type"` + Definition string `json:"definition"` + } `json:"meta"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"original_media_info"` + PosterURL string `json:"poster_url"` + PlayableStatus int `json:"playable_status"` + } `json:"data"` +} + +type CommonResp struct { + Code int `json:"code"` + Msg string `json:"msg,omitempty"` + Message string `json:"message,omitempty"` // 错误情况下的消息 + Data json.RawMessage `json:"data,omitempty"` // 原始数据,稍后解析 + Error *struct { + Code int `json:"code"` + Message string `json:"message"` + Locale string `json:"locale"` + } `json:"error,omitempty"` +} + +// IsSuccess 判断响应是否成功 +func (r *CommonResp) IsSuccess() bool { + return r.Code == 0 +} + +// GetError 获取错误信息 +func (r *CommonResp) GetError() error { + if r.IsSuccess() { + return nil + } + // 优先使用message字段 + errMsg := r.Message + if errMsg == "" { + errMsg = r.Msg + } + // 如果error对象存在且有详细消息,则使用error中的信息 + if r.Error != nil && r.Error.Message != "" { + errMsg = r.Error.Message + } + + return fmt.Errorf("[doubao] API error (code: %d): %s", r.Code, errMsg) +} + +// UnmarshalData 将data字段解析为指定类型 +func (r *CommonResp) UnmarshalData(v interface{}) error { + if !r.IsSuccess() { + return r.GetError() + } + + if len(r.Data) == 0 { + return nil + } + + return json.Unmarshal(r.Data, v) +} diff --git a/drivers/doubao_share/util.go b/drivers/doubao_share/util.go new file mode 100644 index 00000000..e0fc526e --- /dev/null +++ b/drivers/doubao_share/util.go @@ -0,0 +1,744 @@ +package doubao_share + +import ( + "context" + "encoding/json" + "fmt" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/model" + "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" + "net/http" + "net/url" + "path" + "regexp" + "strings" + "time" +) + +const ( + DirectoryType = 1 + FileType = 2 + LinkType = 3 + ImageType = 4 + PagesType = 5 + VideoType = 6 + AudioType = 7 + MeetingMinutesType = 8 +) + +var FileNodeType = map[int]string{ + 1: "directory", + 2: "file", + 3: "link", + 4: "image", + 5: "pages", + 6: "video", + 7: "audio", + 8: "meeting_minutes", +} + +const ( + BaseURL = "https://www.doubao.com" + FileDataType = "file" + ImgDataType = "image" + VideoDataType = "video" + UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" +) + +func (d *DoubaoShare) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + reqUrl := BaseURL + path + req := base.RestyClient.R() + + req.SetHeaders(map[string]string{ + "Cookie": d.Cookie, + "User-Agent": UserAgent, + }) + + req.SetQueryParams(map[string]string{ + "version_code": "20800", + "device_platform": "web", + }) + + if callback != nil { + callback(req) + } + + var commonResp CommonResp + + res, err := req.Execute(method, reqUrl) + log.Debugln(res.String()) + if err != nil { + return nil, err + } + + body := res.Body() + // 先解析为通用响应 + if err = json.Unmarshal(body, &commonResp); err != nil { + return nil, err + } + // 检查响应是否成功 + if !commonResp.IsSuccess() { + return body, commonResp.GetError() + } + + if resp != nil { + if err = json.Unmarshal(body, resp); err != nil { + return body, err + } + } + + return body, nil +} + +func (d *DoubaoShare) getFiles(dirId, nodeId, cursor string) (resp []File, err error) { + var r NodeInfoResp + + var body = base.Json{ + "share_id": dirId, + "node_id": nodeId, + } + // 如果有游标,则设置游标和大小 + if cursor != "" { + body["cursor"] = cursor + body["size"] = 50 + } else { + body["need_full_path"] = false + } + + _, err = d.request("/samantha/aispace/share/node_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(body) + }, &r) + if err != nil { + return nil, err + } + + if r.NodeInfoData.Children != nil { + resp = r.NodeInfoData.Children + } + + if r.NodeInfoData.NextCursor != "-1" { + // 递归获取下一页 + nextFiles, err := d.getFiles(dirId, nodeId, r.NodeInfoData.NextCursor) + if err != nil { + return nil, err + } + + resp = append(r.NodeInfoData.Children, nextFiles...) + } + + return resp, err +} + +func (d *DoubaoShare) getShareOverview(shareId, cursor string) (resp []File, err error) { + return d.getShareOverviewWithHistory(shareId, cursor, make(map[string]bool)) +} + +func (d *DoubaoShare) getShareOverviewWithHistory(shareId, cursor string, cursorHistory map[string]bool) (resp []File, err error) { + var r NodeInfoResp + + var body = base.Json{ + "share_id": shareId, + } + // 如果有游标,则设置游标和大小 + if cursor != "" { + body["cursor"] = cursor + body["size"] = 50 + } else { + body["need_full_path"] = false + } + + _, err = d.request("/samantha/aispace/share/overview", http.MethodPost, func(req *resty.Request) { + req.SetBody(body) + }, &r) + if err != nil { + return nil, err + } + + if r.NodeInfoData.NodeList != nil { + resp = r.NodeInfoData.NodeList + } + + if r.NodeInfoData.NextCursor != "-1" { + // 检查游标是否重复出现,防止无限循环 + if cursorHistory[r.NodeInfoData.NextCursor] { + return resp, nil + } + + // 记录当前游标 + cursorHistory[r.NodeInfoData.NextCursor] = true + + // 递归获取下一页 + nextFiles, err := d.getShareOverviewWithHistory(shareId, r.NodeInfoData.NextCursor, cursorHistory) + if err != nil { + return nil, err + } + + resp = append(resp, nextFiles...) + } + + return resp, nil +} + +func (d *DoubaoShare) initShareList() error { + if d.Addition.ShareIds == "" { + return fmt.Errorf("share_ids is empty") + } + + // 解析分享配置 + shareConfigs, rootShares, err := d._parseShareConfigs() + if err != nil { + return err + } + + // 检查路径冲突 + if err := d._detectPathConflicts(shareConfigs); err != nil { + return err + } + + // 构建树形结构 + rootMap := d._buildTreeStructure(shareConfigs, rootShares) + + // 提取顶级节点 + topLevelNodes := d._extractTopLevelNodes(rootMap, rootShares) + if len(topLevelNodes) == 0 { + return fmt.Errorf("no valid share_ids found") + } + + // 存储结果 + d.RootFiles = topLevelNodes + + return nil +} + +// 从配置中解析分享ID和路径 +func (d *DoubaoShare) _parseShareConfigs() (map[string]string, []string, error) { + shareConfigs := make(map[string]string) // 路径 -> 分享ID + rootShares := make([]string, 0) // 根目录显示的分享ID + + lines := strings.Split(strings.TrimSpace(d.Addition.ShareIds), "\n") + if len(lines) == 0 { + return nil, nil, fmt.Errorf("no share_ids found") + } + + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // 解析分享ID和路径 + parts := strings.Split(line, "|") + var shareId, sharePath string + + if len(parts) == 1 { + // 无路径分享,直接在根目录显示 + shareId = _extractShareId(parts[0]) + if shareId != "" { + rootShares = append(rootShares, shareId) + } + continue + } else if len(parts) >= 2 { + shareId = _extractShareId(parts[0]) + sharePath = strings.Trim(parts[1], "/") + } + + if shareId == "" { + log.Warnf("[doubao_share] Invalid Share_id Format: %s", line) + continue + } + + // 空路径也加入根目录显示 + if sharePath == "" { + rootShares = append(rootShares, shareId) + continue + } + + // 添加到路径映射 + shareConfigs[sharePath] = shareId + } + + return shareConfigs, rootShares, nil +} + +// 检测路径冲突 +func (d *DoubaoShare) _detectPathConflicts(shareConfigs map[string]string) error { + // 检查直接路径冲突 + pathToShareIds := make(map[string][]string) + for sharePath, id := range shareConfigs { + pathToShareIds[sharePath] = append(pathToShareIds[sharePath], id) + } + + for sharePath, ids := range pathToShareIds { + if len(ids) > 1 { + return fmt.Errorf("路径冲突: 路径 '%s' 被多个不同的分享ID使用: %s", + sharePath, strings.Join(ids, ", ")) + } + } + + // 检查层次冲突 + for path1, id1 := range shareConfigs { + for path2, id2 := range shareConfigs { + if path1 == path2 || id1 == id2 { + continue + } + + // 检查前缀冲突 + if strings.HasPrefix(path2, path1+"/") || strings.HasPrefix(path1, path2+"/") { + return fmt.Errorf("路径冲突: 路径 '%s' (ID: %s) 与路径 '%s' (ID: %s) 存在层次冲突", + path1, id1, path2, id2) + } + } + } + + return nil +} + +// 构建树形结构 +func (d *DoubaoShare) _buildTreeStructure(shareConfigs map[string]string, rootShares []string) map[string]*RootFileList { + rootMap := make(map[string]*RootFileList) + + // 添加所有分享节点 + for sharePath, shareId := range shareConfigs { + children := make([]RootFileList, 0) + rootMap[sharePath] = &RootFileList{ + ShareID: shareId, + VirtualPath: sharePath, + NodeInfo: NodeInfoData{}, + Child: &children, + } + } + + // 构建父子关系 + for sharePath, node := range rootMap { + if sharePath == "" { + continue + } + + pathParts := strings.Split(sharePath, "/") + if len(pathParts) > 1 { + parentPath := strings.Join(pathParts[:len(pathParts)-1], "/") + + // 确保所有父级路径都已创建 + _ensurePathExists(rootMap, parentPath) + + // 添加当前节点到父节点 + if parent, exists := rootMap[parentPath]; exists { + *parent.Child = append(*parent.Child, *node) + } + } + } + + return rootMap +} + +// 提取顶级节点 +func (d *DoubaoShare) _extractTopLevelNodes(rootMap map[string]*RootFileList, rootShares []string) []RootFileList { + var topLevelNodes []RootFileList + + // 添加根目录分享 + for _, shareId := range rootShares { + children := make([]RootFileList, 0) + topLevelNodes = append(topLevelNodes, RootFileList{ + ShareID: shareId, + VirtualPath: "", + NodeInfo: NodeInfoData{}, + Child: &children, + }) + } + + // 添加顶级目录 + for rootPath, node := range rootMap { + if rootPath == "" { + continue + } + + isTopLevel := true + pathParts := strings.Split(rootPath, "/") + + if len(pathParts) > 1 { + parentPath := strings.Join(pathParts[:len(pathParts)-1], "/") + if _, exists := rootMap[parentPath]; exists { + isTopLevel = false + } + } + + if isTopLevel { + topLevelNodes = append(topLevelNodes, *node) + } + } + + return topLevelNodes +} + +// 确保路径存在,创建所有必要的中间节点 +func _ensurePathExists(rootMap map[string]*RootFileList, path string) { + if path == "" { + return + } + + // 如果路径已存在,不需要再处理 + if _, exists := rootMap[path]; exists { + return + } + + // 创建当前路径节点 + children := make([]RootFileList, 0) + rootMap[path] = &RootFileList{ + ShareID: "", + VirtualPath: path, + NodeInfo: NodeInfoData{}, + Child: &children, + } + + // 处理父路径 + pathParts := strings.Split(path, "/") + if len(pathParts) > 1 { + parentPath := strings.Join(pathParts[:len(pathParts)-1], "/") + + // 确保父路径存在 + _ensurePathExists(rootMap, parentPath) + + // 将当前节点添加为父节点的子节点 + if parent, exists := rootMap[parentPath]; exists { + *parent.Child = append(*parent.Child, *rootMap[path]) + } + } +} + +// _extractShareId 从URL或直接ID中提取分享ID +func _extractShareId(input string) string { + input = strings.TrimSpace(input) + if strings.HasPrefix(input, "http") { + regex := regexp.MustCompile(`/drive/s/([a-zA-Z0-9]+)`) + if matches := regex.FindStringSubmatch(input); len(matches) > 1 { + return matches[1] + } + return "" + } + return input // 直接返回ID +} + +// _findRootFileByShareID 查找指定ShareID的配置 +func _findRootFileByShareID(rootFiles []RootFileList, shareID string) *RootFileList { + for i, rf := range rootFiles { + if rf.ShareID == shareID { + return &rootFiles[i] + } + if rf.Child != nil && len(*rf.Child) > 0 { + if found := _findRootFileByShareID(*rf.Child, shareID); found != nil { + return found + } + } + } + return nil +} + +// _findNodeByPath 查找指定路径的节点 +func _findNodeByPath(rootFiles []RootFileList, path string) *RootFileList { + for i, rf := range rootFiles { + if rf.VirtualPath == path { + return &rootFiles[i] + } + if rf.Child != nil && len(*rf.Child) > 0 { + if found := _findNodeByPath(*rf.Child, path); found != nil { + return found + } + } + } + return nil +} + +// _findShareByPath 根据路径查找分享和相对路径 +func _findShareByPath(rootFiles []RootFileList, path string) (*RootFileList, string) { + // 完全匹配或子路径匹配 + for i, rf := range rootFiles { + if rf.VirtualPath == path { + return &rootFiles[i], "" + } + + if rf.VirtualPath != "" && strings.HasPrefix(path, rf.VirtualPath+"/") { + relPath := strings.TrimPrefix(path, rf.VirtualPath+"/") + + // 先检查子节点 + if rf.Child != nil && len(*rf.Child) > 0 { + if child, childPath := _findShareByPath(*rf.Child, path); child != nil { + return child, childPath + } + } + + return &rootFiles[i], relPath + } + + // 递归检查子节点 + if rf.Child != nil && len(*rf.Child) > 0 { + if child, childPath := _findShareByPath(*rf.Child, path); child != nil { + return child, childPath + } + } + } + + // 检查根目录分享 + for i, rf := range rootFiles { + if rf.VirtualPath == "" && rf.ShareID != "" { + parts := strings.SplitN(path, "/", 2) + if len(parts) > 0 && parts[0] == rf.ShareID { + if len(parts) > 1 { + return &rootFiles[i], parts[1] + } + return &rootFiles[i], "" + } + } + } + + return nil, "" +} + +// _findShareAndPath 根据给定路径查找对应的ShareID和相对路径 +func (d *DoubaoShare) _findShareAndPath(dir model.Obj) (string, string, error) { + dirPath := dir.GetPath() + + // 如果是根目录,返回空值表示需要列出所有分享 + if dirPath == "/" || dirPath == "" { + return "", "", nil + } + + // 检查是否是 FileObject 类型,并获取 ShareID + if fo, ok := dir.(*FileObject); ok && fo.ShareID != "" { + // 直接使用对象中存储的 ShareID + // 计算相对路径(移除前导斜杠) + relativePath := strings.TrimPrefix(dirPath, "/") + + // 递归查找对应的 RootFile + found := _findRootFileByShareID(d.RootFiles, fo.ShareID) + if found != nil { + if found.VirtualPath != "" { + // 如果此分享配置了路径前缀,需要考虑相对路径的计算 + if strings.HasPrefix(relativePath, found.VirtualPath) { + return fo.ShareID, strings.TrimPrefix(relativePath, found.VirtualPath+"/"), nil + } + } + return fo.ShareID, relativePath, nil + } + + // 如果找不到对应的 RootFile 配置,仍然使用对象中的 ShareID + return fo.ShareID, relativePath, nil + } + + // 移除开头的斜杠 + cleanPath := strings.TrimPrefix(dirPath, "/") + + // 先检查是否有直接匹配的根目录分享 + for _, rootFile := range d.RootFiles { + if rootFile.VirtualPath == "" && rootFile.ShareID != "" { + // 检查是否匹配当前路径的第一部分 + parts := strings.SplitN(cleanPath, "/", 2) + if len(parts) > 0 && parts[0] == rootFile.ShareID { + if len(parts) > 1 { + return rootFile.ShareID, parts[1], nil + } + return rootFile.ShareID, "", nil + } + } + } + + // 查找匹配此路径的分享或虚拟目录 + share, relPath := _findShareByPath(d.RootFiles, cleanPath) + if share != nil { + return share.ShareID, relPath, nil + } + + log.Warnf("[doubao_share] No matching share path found: %s", dirPath) + return "", "", fmt.Errorf("no matching share path found: %s", dirPath) +} + +// convertToFileObject 将File转换为FileObject +func (d *DoubaoShare) convertToFileObject(file File, shareId string, relativePath string) *FileObject { + // 构建文件对象 + obj := &FileObject{ + Object: model.Object{ + ID: file.ID, + Name: file.Name, + Size: file.Size, + Modified: time.Unix(file.UpdateTime, 0), + Ctime: time.Unix(file.CreateTime, 0), + IsFolder: file.NodeType == DirectoryType, + Path: path.Join(relativePath, file.Name), + }, + ShareID: shareId, + Key: file.Key, + NodeID: file.ID, + NodeType: file.NodeType, + } + + return obj +} + +// getFilesInPath 获取指定分享和路径下的文件 +func (d *DoubaoShare) getFilesInPath(ctx context.Context, shareId, nodeId, relativePath string) ([]model.Obj, error) { + var ( + files []File + err error + ) + + // 调用overview接口获取分享链接信息 nodeId + if nodeId == "" { + files, err = d.getShareOverview(shareId, "") + if err != nil { + return nil, fmt.Errorf("failed to get share link information: %w", err) + } + + result := make([]model.Obj, 0, len(files)) + for _, file := range files { + result = append(result, d.convertToFileObject(file, shareId, "/")) + } + + return result, nil + + } else { + files, err = d.getFiles(shareId, nodeId, "") + if err != nil { + return nil, fmt.Errorf("failed to get share file: %w", err) + } + + result := make([]model.Obj, 0, len(files)) + for _, file := range files { + result = append(result, d.convertToFileObject(file, shareId, path.Join("/", relativePath))) + } + + return result, nil + } +} + +// listRootDirectory 处理根目录的内容展示 +func (d *DoubaoShare) listRootDirectory(ctx context.Context) ([]model.Obj, error) { + objects := make([]model.Obj, 0) + + // 分组处理:直接显示的分享内容 vs 虚拟目录 + var directShareIDs []string + addedDirs := make(map[string]bool) + + // 处理所有根节点 + for _, rootFile := range d.RootFiles { + if rootFile.VirtualPath == "" && rootFile.ShareID != "" { + // 无路径分享,记录ShareID以便后续获取内容 + directShareIDs = append(directShareIDs, rootFile.ShareID) + } else { + // 有路径的分享,显示第一级目录 + parts := strings.SplitN(rootFile.VirtualPath, "/", 2) + firstLevel := parts[0] + + // 避免重复添加同名目录 + if _, exists := addedDirs[firstLevel]; exists { + continue + } + + // 创建虚拟目录对象 + obj := &FileObject{ + Object: model.Object{ + ID: "", + Name: firstLevel, + Modified: time.Now(), + Ctime: time.Now(), + IsFolder: true, + Path: path.Join("/", firstLevel), + }, + ShareID: rootFile.ShareID, + Key: "", + NodeID: "", + NodeType: DirectoryType, + } + objects = append(objects, obj) + addedDirs[firstLevel] = true + } + } + + // 处理直接显示的分享内容 + for _, shareID := range directShareIDs { + shareFiles, err := d.getFilesInPath(ctx, shareID, "", "") + if err != nil { + log.Warnf("[doubao_share] Failed to get list of files in share %s: %s", shareID, err) + continue + } + objects = append(objects, shareFiles...) + } + + return objects, nil +} + +// listVirtualDirectoryContent 列出虚拟目录的内容 +func (d *DoubaoShare) listVirtualDirectoryContent(dir model.Obj) ([]model.Obj, error) { + dirPath := strings.TrimPrefix(dir.GetPath(), "/") + objects := make([]model.Obj, 0) + + // 递归查找此路径的节点 + node := _findNodeByPath(d.RootFiles, dirPath) + + if node != nil && node.Child != nil { + // 显示此节点的所有子节点 + for _, child := range *node.Child { + // 计算显示名称(取路径的最后一部分) + displayName := child.VirtualPath + if child.VirtualPath != "" { + parts := strings.Split(child.VirtualPath, "/") + displayName = parts[len(parts)-1] + } else if child.ShareID != "" { + displayName = child.ShareID + } + + obj := &FileObject{ + Object: model.Object{ + ID: "", + Name: displayName, + Modified: time.Now(), + Ctime: time.Now(), + IsFolder: true, + Path: path.Join("/", child.VirtualPath), + }, + ShareID: child.ShareID, + Key: "", + NodeID: "", + NodeType: DirectoryType, + } + objects = append(objects, obj) + } + } + + return objects, nil +} + +// generateContentDisposition 生成符合RFC 5987标准的Content-Disposition头部 +func generateContentDisposition(filename string) string { + // 按照RFC 2047进行编码,用于filename部分 + encodedName := urlEncode(filename) + + // 按照RFC 5987进行编码,用于filename*部分 + encodedNameRFC5987 := encodeRFC5987(filename) + + return fmt.Sprintf("attachment; filename=\"%s\"; filename*=utf-8''%s", + encodedName, encodedNameRFC5987) +} + +// encodeRFC5987 按照RFC 5987规范编码字符串,适用于HTTP头部参数中的非ASCII字符 +func encodeRFC5987(s string) string { + var buf strings.Builder + for _, r := range []byte(s) { + // 根据RFC 5987,只有字母、数字和部分特殊符号可以不编码 + if (r >= 'a' && r <= 'z') || + (r >= 'A' && r <= 'Z') || + (r >= '0' && r <= '9') || + r == '-' || r == '.' || r == '_' || r == '~' { + buf.WriteByte(r) + } else { + // 其他字符都需要百分号编码 + fmt.Fprintf(&buf, "%%%02X", r) + } + } + return buf.String() +} + +func urlEncode(s string) string { + s = url.QueryEscape(s) + s = strings.ReplaceAll(s, "+", "%20") + return s +} From 28e5b5759ecade05072a2e4ee1df5c62bcfcf1d8 Mon Sep 17 00:00:00 2001 From: New Future Date: Sat, 19 Apr 2025 14:23:48 +0800 Subject: [PATCH 173/187] feat(azure_blob): implement GetRootId interface in Addition struct (#8389) fix failed get dir --- drivers/azure_blob/meta.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/azure_blob/meta.go b/drivers/azure_blob/meta.go index 8e42bdd6..b1e021b8 100644 --- a/drivers/azure_blob/meta.go +++ b/drivers/azure_blob/meta.go @@ -12,6 +12,11 @@ type Addition struct { SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."` } +// implement GetRootId interface +func (r Addition) GetRootId() string { + return r.ContainerName +} + var config = driver.Config{ Name: "Azure Blob Storage", LocalSort: true, From 52d4e8ec47ba196f7c502e6541141b82a6d29097 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sat, 19 Apr 2025 14:24:43 +0800 Subject: [PATCH 174/187] fix(lanzou): remove JavaScript comments from response data (#8386) * feat(lanzou): add RemoveJSComment function to clean JavaScript comments from HTML * feat(lanzou): remove comments from share page data in getFilesByShareUrl function * fix(lanzou): optimize RemoveJSComment function to improve comment removal logic --- drivers/lanzou/help.go | 36 ++++++++++++++++++++++++++++++++++++ drivers/lanzou/util.go | 4 ++++ 2 files changed, 40 insertions(+) diff --git a/drivers/lanzou/help.go b/drivers/lanzou/help.go index 81d7c567..c3f5c6bb 100644 --- a/drivers/lanzou/help.go +++ b/drivers/lanzou/help.go @@ -78,6 +78,42 @@ func RemoveNotes(html string) string { }) } +// 清理JS注释 +func RemoveJSComment(data string) string { + var result strings.Builder + inComment := false + inSingleLineComment := false + + for i := 0; i < len(data); i++ { + v := data[i] + + if inSingleLineComment && (v == '\n' || v == '\r') { + inSingleLineComment = false + result.WriteByte(v) + continue + } + if inComment && v == '*' && i+1 < len(data) && data[i+1] == '/' { + inComment = false + continue + } + if v == '/' && i+1 < len(data) { + nextChar := data[i+1] + if nextChar == '*' { + inComment = true + i++ + continue + } else if nextChar == '/' { + inSingleLineComment = true + i++ + continue + } + } + result.WriteByte(v) + } + + return result.String() +} + var findAcwScV2Reg = regexp.MustCompile(`arg1='([0-9A-Z]+)'`) // 在页面被过多访问或其他情况下,有时候会先返回一个加密的页面,其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面 diff --git a/drivers/lanzou/util.go b/drivers/lanzou/util.go index 4b9959ad..e66252bc 100644 --- a/drivers/lanzou/util.go +++ b/drivers/lanzou/util.go @@ -348,6 +348,10 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) ( file FileOrFolderByShareUrl ) + // 删除注释 + sharePageData = RemoveNotes(sharePageData) + sharePageData = RemoveJSComment(sharePageData) + // 需要密码 if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") { sharePageData, err := getJSFunctionByName(sharePageData, "down_p") From b449312da83c67b154668d5835e44cbed8260c74 Mon Sep 17 00:00:00 2001 From: wxnq <49645495+yanjing19989@users.noreply.github.com> Date: Sat, 19 Apr 2025 14:26:19 +0800 Subject: [PATCH 175/187] fix(docker_release): avoid duplicate occupation in docker image (#8393 close #8388) * fix(ci): modify the method of adding permissions * fix(build): modify the method of adding permissions(to keep up with ci) --- Dockerfile | 7 +++---- Dockerfile.ci | 7 +++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0e2ee96f..f5e91bee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,10 +32,9 @@ RUN apk update && \ /opt/aria2/.aria2/tracker.sh ; \ rm -rf /var/cache/apk/* -COPY --from=builder /app/bin/alist ./ -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /opt/alist/alist && \ - chmod +x /entrypoint.sh && /entrypoint.sh version +COPY --chmod=755 --from=builder /app/bin/alist ./ +COPY --chmod=755 entrypoint.sh /entrypoint.sh +RUN /entrypoint.sh version ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} VOLUME /opt/alist/data/ diff --git a/Dockerfile.ci b/Dockerfile.ci index 25d502a9..a17aae9f 100644 --- a/Dockerfile.ci +++ b/Dockerfile.ci @@ -24,10 +24,9 @@ RUN apk update && \ /opt/aria2/.aria2/tracker.sh ; \ rm -rf /var/cache/apk/* -COPY /build/${TARGETPLATFORM}/alist ./ -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /opt/alist/alist && \ - chmod +x /entrypoint.sh && /entrypoint.sh version +COPY --chmod=755 /build/${TARGETPLATFORM}/alist ./ +COPY --chmod=755 entrypoint.sh /entrypoint.sh +RUN /entrypoint.sh version ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} VOLUME /opt/alist/data/ From 8f89c55acaeb04a29bab59d225db13983c1082e0 Mon Sep 17 00:00:00 2001 From: Lin Tianchuan <47070449+1024th@users.noreply.github.com> Date: Sat, 19 Apr 2025 14:27:13 +0800 Subject: [PATCH 176/187] perf(local): avoid duplicate parsing of VideoThumbPos (#7812) * feat(local): support percent for video thumbnail The percentage determines the point in the video (as a percentage of the total duration) at which the thumbnail will be generated. * feat(local): support both time and percent for video thumbnail * refactor(local): avoid duplicate parsing of VideoThumbPos --- drivers/local/driver.go | 8 ++++++++ drivers/local/util.go | 16 ++++------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/local/driver.go b/drivers/local/driver.go index 8a804ef3..faa2b3bd 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -35,6 +35,10 @@ type Local struct { // zero means no limit thumbConcurrency int thumbTokenBucket TokenBucket + + // video thumb position + videoThumbPos float64 + videoThumbPosIsPercentage bool } func (d *Local) Config() driver.Config { @@ -92,6 +96,8 @@ func (d *Local) Init(ctx context.Context) error { if val < 0 || val > 100 { return fmt.Errorf("invalid video_thumb_pos value: %s, the precentage must be a number between 0 and 100", d.VideoThumbPos) } + d.videoThumbPosIsPercentage = true + d.videoThumbPos = val / 100 } else { val, err := strconv.ParseFloat(d.VideoThumbPos, 64) if err != nil { @@ -100,6 +106,8 @@ func (d *Local) Init(ctx context.Context) error { if val < 0 { return fmt.Errorf("invalid video_thumb_pos value: %s, the time must be a positive number", d.VideoThumbPos) } + d.videoThumbPosIsPercentage = false + d.videoThumbPos = val } return nil } diff --git a/drivers/local/util.go b/drivers/local/util.go index d2fbd097..802f60cf 100644 --- a/drivers/local/util.go +++ b/drivers/local/util.go @@ -61,22 +61,14 @@ func (d *Local) GetSnapshot(videoPath string) (imgData *bytes.Buffer, err error) } var ss string - if strings.HasSuffix(d.VideoThumbPos, "%") { - percentage, err := strconv.ParseFloat(strings.TrimSuffix(d.VideoThumbPos, "%"), 64) - if err != nil { - return nil, err - } - ss = fmt.Sprintf("%f", totalDuration*percentage/100) + if d.videoThumbPosIsPercentage { + ss = fmt.Sprintf("%f", totalDuration*d.videoThumbPos) } else { - val, err := strconv.ParseFloat(d.VideoThumbPos, 64) - if err != nil { - return nil, err - } // If the value is greater than the total duration, use the total duration - if val > totalDuration { + if d.videoThumbPos > totalDuration { ss = fmt.Sprintf("%f", totalDuration) } else { - ss = d.VideoThumbPos + ss = fmt.Sprintf("%f", d.videoThumbPos) } } From 41bdab49aa8acca9e88862c3db55cd7a8a84ba6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sam-=20Pan=EF=BC=88=E6=BD=98=E7=BB=8D=E6=A3=AE=EF=BC=89?= Date: Sat, 19 Apr 2025 14:29:12 +0800 Subject: [PATCH 177/187] fix(139): incorrect host (#8368) * fix: correct new personal cloud path for 139Driver * Update drivers/139/driver.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix bug --------- Co-authored-by: panshaosen <19802021493@139.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: j2rong4cn <253551464@qq.com> --- drivers/139/driver.go | 72 ++++++++++++++++++++++--------------------- drivers/139/types.go | 33 +++++++++++++++----- drivers/139/util.go | 72 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 130 insertions(+), 47 deletions(-) diff --git a/drivers/139/driver.go b/drivers/139/driver.go index 0af5a4f7..a57609bc 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -24,9 +24,10 @@ import ( type Yun139 struct { model.Storage Addition - cron *cron.Cron - Account string - ref *Yun139 + cron *cron.Cron + Account string + ref *Yun139 + PersonalCloudHost string } func (d *Yun139) Config() driver.Config { @@ -39,13 +40,36 @@ func (d *Yun139) GetAddition() driver.Additional { func (d *Yun139) Init(ctx context.Context) error { if d.ref == nil { - if d.Authorization == "" { + if len(d.Authorization) == 0 { return fmt.Errorf("authorization is empty") } err := d.refreshToken() if err != nil { return err } + + // Query Route Policy + var resp QueryRoutePolicyResp + _, err = d.requestRoute(base.Json{ + "userInfo": base.Json{ + "userType": 1, + "accountType": 1, + "accountName": d.Account}, + "modAddrType": 1, + }, &resp) + if err != nil { + return err + } + for _, policyItem := range resp.Data.RoutePolicyList { + if policyItem.ModName == "personal" { + d.PersonalCloudHost = policyItem.HttpsUrl + break + } + } + if len(d.PersonalCloudHost) == 0 { + return fmt.Errorf("PersonalCloudHost is empty") + } + d.cron = cron.NewCron(time.Hour * 12) d.cron.Do(func() { err := d.refreshToken() @@ -71,28 +95,6 @@ func (d *Yun139) Init(ctx context.Context) error { default: return errs.NotImplement } - // if d.ref != nil { - // return nil - // } - // decode, err := base64.StdEncoding.DecodeString(d.Authorization) - // if err != nil { - // return err - // } - // decodeStr := string(decode) - // splits := strings.Split(decodeStr, ":") - // if len(splits) < 2 { - // return fmt.Errorf("authorization is invalid, splits < 2") - // } - // d.Account = splits[1] - // _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ - // "qryUserExternInfoReq": base.Json{ - // "commonAccountInfo": base.Json{ - // "account": d.getAccount(), - // "accountType": 1, - // }, - // }, - // }, nil) - // return err return nil } @@ -160,7 +162,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin "type": "folder", "fileRenameMode": "force_rename", } - pathname := "/hcy/file/create" + pathname := "/file/create" _, err = d.personalPost(pathname, data, nil) case MetaPersonal: data := base.Json{ @@ -213,7 +215,7 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, "fileIds": []string{srcObj.GetID()}, "toParentFileId": dstDir.GetID(), } - pathname := "/hcy/file/batchMove" + pathname := "/file/batchMove" _, err := d.personalPost(pathname, data, nil) if err != nil { return nil, err @@ -290,7 +292,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "name": newName, "description": "", } - pathname := "/hcy/file/update" + pathname := "/file/update" _, err = d.personalPost(pathname, data, nil) case MetaPersonal: var data base.Json @@ -390,7 +392,7 @@ func (d *Yun139) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { "fileIds": []string{srcObj.GetID()}, "toParentFileId": dstDir.GetID(), } - pathname := "/hcy/file/batchCopy" + pathname := "/file/batchCopy" _, err := d.personalPost(pathname, data, nil) return err case MetaPersonal: @@ -430,7 +432,7 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { data := base.Json{ "fileIds": []string{obj.GetID()}, } - pathname := "/hcy/recyclebin/batchTrash" + pathname := "/recyclebin/batchTrash" _, err := d.personalPost(pathname, data, nil) return err case MetaGroup: @@ -574,7 +576,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "type": "file", "fileRenameMode": "auto_rename", } - pathname := "/hcy/file/create" + pathname := "/file/create" var resp PersonalUploadResp _, err = d.personalPost(pathname, data, &resp) if err != nil { @@ -611,7 +613,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "accountType": 1, }, } - pathname := "/hcy/file/getUploadUrl" + pathname := "/file/getUploadUrl" var moreresp PersonalUploadUrlResp _, err = d.personalPost(pathname, moredata, &moreresp) if err != nil { @@ -662,7 +664,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr "fileId": resp.Data.FileId, "uploadId": resp.Data.UploadId, } - _, err = d.personalPost("/hcy/file/complete", data, nil) + _, err = d.personalPost("/file/complete", data, nil) if err != nil { return err } @@ -854,7 +856,7 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{}, } switch args.Method { case "video_preview": - uri = "/hcy/videoPreview/getPreviewInfo" + uri = "/videoPreview/getPreviewInfo" default: return nil, errs.NotSupport } diff --git a/drivers/139/types.go b/drivers/139/types.go index 50ae1f81..d5f025a1 100644 --- a/drivers/139/types.go +++ b/drivers/139/types.go @@ -285,11 +285,30 @@ type PersonalUploadUrlResp struct { } } -type RefreshTokenResp struct { - XMLName xml.Name `xml:"root"` - Return string `xml:"return"` - Token string `xml:"token"` - Expiretime int32 `xml:"expiretime"` - AccessToken string `xml:"accessToken"` - Desc string `xml:"desc"` +type QueryRoutePolicyResp struct { + Success bool `json:"success"` + Code string `json:"code"` + Message string `json:"message"` + Data struct { + RoutePolicyList []struct { + SiteID string `json:"siteID"` + SiteCode string `json:"siteCode"` + ModName string `json:"modName"` + HttpUrl string `json:"httpUrl"` + HttpsUrl string `json:"httpsUrl"` + EnvID string `json:"envID"` + ExtInfo string `json:"extInfo"` + HashName string `json:"hashName"` + ModAddrType int `json:"modAddrType"` + } `json:"routePolicyList"` + } `json:"data"` +} + +type RefreshTokenResp struct { + XMLName xml.Name `xml:"root"` + Return string `xml:"return"` + Token string `xml:"token"` + Expiretime int32 `xml:"expiretime"` + AccessToken string `xml:"accessToken"` + Desc string `xml:"desc"` } diff --git a/drivers/139/util.go b/drivers/139/util.go index 53defef5..4b43e7d3 100644 --- a/drivers/139/util.go +++ b/drivers/139/util.go @@ -157,6 +157,64 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba } return res.Body(), nil } + +func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error) { + url := "https://user-njs.yun.139.com/user/route/qryRoutePolicy" + req := base.RestyClient.R() + randStr := random.String(16) + ts := time.Now().Format("2006-01-02 15:04:05") + callback := func(req *resty.Request) { + req.SetBody(data) + } + if callback != nil { + callback(req) + } + body, err := utils.Json.Marshal(req.Body) + if err != nil { + return nil, err + } + sign := calSign(string(body), ts, randStr) + svcType := "1" + if d.isFamily() { + svcType = "2" + } + req.SetHeaders(map[string]string{ + "Accept": "application/json, text/plain, */*", + "CMS-DEVICE": "default", + "Authorization": "Basic " + d.getAuthorization(), + "mcloud-channel": "1000101", + "mcloud-client": "10701", + //"mcloud-route": "001", + "mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), + //"mcloud-skey":"", + "mcloud-version": "7.14.0", + "Origin": "https://yun.139.com", + "Referer": "https://yun.139.com/w/", + "x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", + "x-huawei-channelSrc": "10000034", + "x-inner-ntwk": "2", + "x-m4c-caller": "PC", + "x-m4c-src": "10002", + "x-SvcType": svcType, + "Inner-Hcy-Router-Https": "1", + }) + + var e BaseResp + req.SetResult(&e) + res, err := req.Execute(http.MethodPost, url) + log.Debugln(res.String()) + if !e.Success { + return nil, errors.New(e.Message) + } + if resp != nil { + err = utils.Json.Unmarshal(res.Body(), resp) + if err != nil { + return nil, err + } + } + return res.Body(), nil +} + func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) { return d.request(pathname, http.MethodPost, func(req *resty.Request) { req.SetBody(data) @@ -391,7 +449,7 @@ func unicode(str string) string { } func (d *Yun139) personalRequest(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { - url := "https://personal-kd-njs.yun.139.com" + pathname + url := d.getPersonalCloudHost() + pathname req := base.RestyClient.R() randStr := random.String(16) ts := time.Now().Format("2006-01-02 15:04:05") @@ -417,8 +475,6 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R "Mcloud-Route": "001", "Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), "Mcloud-Version": "7.14.0", - "Origin": "https://yun.139.com", - "Referer": "https://yun.139.com/w/", "x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", "x-huawei-channelSrc": "10000034", "x-inner-ntwk": "2", @@ -480,7 +536,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) { "parentFileId": fileId, } var resp PersonalListResp - _, err := d.personalPost("/hcy/file/list", data, &resp) + _, err := d.personalPost("/file/list", data, &resp) if err != nil { return nil, err } @@ -528,7 +584,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) { data := base.Json{ "fileId": fileId, } - res, err := d.personalPost("/hcy/file/getDownloadUrl", + res, err := d.personalPost("/file/getDownloadUrl", data, nil) if err != nil { return "", err @@ -553,3 +609,9 @@ func (d *Yun139) getAccount() string { } return d.Account } +func (d *Yun139) getPersonalCloudHost() string { + if d.ref != nil { + return d.ref.getPersonalCloudHost() + } + return d.PersonalCloudHost +} From 17b42b9fa4ade8d237f407ae1e19c7cc8d7cb09d Mon Sep 17 00:00:00 2001 From: gdm257 <257@gdm.anonaddy.com> Date: Sun, 27 Apr 2025 20:56:04 +0900 Subject: [PATCH 178/187] fix(mega): use newest file for same filename (#8422 close #8344) Mega supports duplicate names but alist does not support. In `List()` method, driver will return multiple files with same name. That makes alist to use oldest version file for listing/downloading. So it is necessary to filter old same name files in a folder. After fixes, all CRUD work normally. Refs #8344 --- drivers/mega/driver.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/mega/driver.go b/drivers/mega/driver.go index f76bfeef..dc7b2201 100644 --- a/drivers/mega/driver.go +++ b/drivers/mega/driver.go @@ -56,12 +56,21 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([] if err != nil { return nil, err } - res := make([]model.Obj, 0) + fn := make(map[string]model.Obj) for i := range nodes { n := nodes[i] - if n.GetType() == mega.FILE || n.GetType() == mega.FOLDER { - res = append(res, &MegaNode{n}) + if n.GetType() != mega.FILE && n.GetType() != mega.FOLDER { + continue } + if _, ok := fn[n.GetName()]; !ok { + fn[n.GetName()] = &MegaNode{n} + } else if sameNameObj := fn[n.GetName()]; (&MegaNode{n}).ModTime().After(sameNameObj.ModTime()) { + fn[n.GetName()] = &MegaNode{n} + } + } + res := make([]model.Obj, 0) + for _, v := range fn { + res = append(res, v) } return res, nil } From bf0705ec172f53c1fb08d7ebda783c41f71757d5 Mon Sep 17 00:00:00 2001 From: Mmx <36563672+Mmx233@users.noreply.github.com> Date: Sun, 27 Apr 2025 19:56:34 +0800 Subject: [PATCH 179/187] fix: shebang of entrypoint.sh (#8408) --- entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/entrypoint.sh b/entrypoint.sh index 28a18d7d..c24ed6ee 100644 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh umask ${UMASK} From e532ab31efb58a9552381b19c483c65d1d7e560b Mon Sep 17 00:00:00 2001 From: Mmx <36563672+Mmx233@users.noreply.github.com> Date: Sun, 27 Apr 2025 19:58:09 +0800 Subject: [PATCH 180/187] fix: remove auth middleware for authn login (#8407) --- server/router.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/router.go b/server/router.go index 2dd6ee88..09a0bb44 100644 --- a/server/router.go +++ b/server/router.go @@ -77,10 +77,10 @@ func Init(e *gin.Engine) { api.GET("/auth/sso_get_token", handles.SSOLoginCallback) // webauthn + api.GET("/authn/webauthn_begin_login", handles.BeginAuthnLogin) + api.POST("/authn/webauthn_finish_login", handles.FinishAuthnLogin) webauthn.GET("/webauthn_begin_registration", handles.BeginAuthnRegistration) webauthn.POST("/webauthn_finish_registration", handles.FinishAuthnRegistration) - webauthn.GET("/webauthn_begin_login", handles.BeginAuthnLogin) - webauthn.POST("/webauthn_finish_login", handles.FinishAuthnLogin) webauthn.POST("/delete_authn", handles.DeleteAuthnLogin) webauthn.GET("/getcredentials", handles.GetAuthnCredentials) From 6d9c554f6f14a2e1471845e37e16a4ea7b157964 Mon Sep 17 00:00:00 2001 From: bigQY <52437374+bigQY@users.noreply.github.com> Date: Sun, 27 Apr 2025 19:58:45 +0800 Subject: [PATCH 181/187] feat: add UseLargeThumbnail for 139 (#8424) --- drivers/139/meta.go | 1 + drivers/139/util.go | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/139/meta.go b/drivers/139/meta.go index 866aadb4..c02b1347 100644 --- a/drivers/139/meta.go +++ b/drivers/139/meta.go @@ -13,6 +13,7 @@ type Addition struct { CloudID string `json:"cloud_id"` CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"` + UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"` } var config = driver.Config{ diff --git a/drivers/139/util.go b/drivers/139/util.go index 4b43e7d3..5adc39b4 100644 --- a/drivers/139/util.go +++ b/drivers/139/util.go @@ -556,7 +556,15 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) { } else { var Thumbnails = item.Thumbnails var ThumbnailUrl string - if len(Thumbnails) > 0 { + if d.UseLargeThumbnail { + for _, thumb := range Thumbnails { + if strings.Contains(thumb.Style, "Large") { + ThumbnailUrl = thumb.Url + break + } + } + } + if ThumbnailUrl == "" && len(Thumbnails) > 0 { ThumbnailUrl = Thumbnails[len(Thumbnails)-1].Url } f = &model.ObjThumb{ From f541489d7d733c30f62f13ecf5cbf70783059e2a Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sun, 27 Apr 2025 19:59:30 +0800 Subject: [PATCH 182/187] fix(netease_music): change ListResp size fields from string to int64 (#8417) --- drivers/netease_music/types.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/netease_music/types.go b/drivers/netease_music/types.go index 12afeb7a..93ecdf70 100644 --- a/drivers/netease_music/types.go +++ b/drivers/netease_music/types.go @@ -2,13 +2,13 @@ package netease_music import ( "context" - "github.com/alist-org/alist/v3/internal/driver" "io" "net/http" "strconv" "strings" "time" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/http_range" @@ -28,8 +28,8 @@ type SongResp struct { } type ListResp struct { - Size string `json:"size"` - MaxSize string `json:"maxSize"` + Size int64 `json:"size"` + MaxSize int64 `json:"maxSize"` Data []struct { AddTime int64 `json:"addTime"` FileName string `json:"fileName"` From b2b91a92814487f52125d17691963bd19bfb6713 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sun, 27 Apr 2025 20:00:25 +0800 Subject: [PATCH 183/187] feat(doubao): add get_download_info API and download_api option (#8428) --- drivers/doubao/driver.go | 64 +++++++++++++++++++++++++--------------- drivers/doubao/meta.go | 1 + drivers/doubao/types.go | 14 ++++++++- 3 files changed, 55 insertions(+), 24 deletions(-) diff --git a/drivers/doubao/driver.go b/drivers/doubao/driver.go index a066feee..0d421946 100644 --- a/drivers/doubao/driver.go +++ b/drivers/doubao/driver.go @@ -3,6 +3,11 @@ package doubao import ( "context" "errors" + "net/http" + "strconv" + "strings" + "time" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -10,10 +15,6 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" "github.com/google/uuid" - "net/http" - "strconv" - "strings" - "time" ) type Doubao struct { @@ -97,33 +98,50 @@ func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) var downloadUrl string if u, ok := file.(*Object); ok { - switch u.NodeType { - case VideoType, AudioType: - var r GetVideoFileUrlResp - _, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) { + switch d.DownloadApi { + case "get_download_info": + var r GetDownloadInfoResp + _, err := d.request("/samantha/aispace/get_download_info", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ - "key": u.Key, - "node_id": file.GetID(), + "requests": []base.Json{{"node_id": file.GetID()}}, }) }, &r) if err != nil { return nil, err } - downloadUrl = r.Data.OriginalMediaInfo.MainURL + downloadUrl = r.Data.DownloadInfos[0].MainURL + case "get_file_url": + switch u.NodeType { + case VideoType, AudioType: + var r GetVideoFileUrlResp + _, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "key": u.Key, + "node_id": file.GetID(), + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.OriginalMediaInfo.MainURL + default: + var r GetFileUrlResp + _, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{u.Key}, + "type": FileNodeType[u.NodeType], + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.FileUrls[0].MainURL + } default: - var r GetFileUrlResp - _, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) { - req.SetBody(base.Json{ - "uris": []string{u.Key}, - "type": FileNodeType[u.NodeType], - }) - }, &r) - if err != nil { - return nil, err - } - - downloadUrl = r.Data.FileUrls[0].MainURL + return nil, errs.NotImplement } // 生成标准的Content-Disposition diff --git a/drivers/doubao/meta.go b/drivers/doubao/meta.go index c3d8eb34..7735e5ff 100644 --- a/drivers/doubao/meta.go +++ b/drivers/doubao/meta.go @@ -12,6 +12,7 @@ type Addition struct { // define other Cookie string `json:"cookie" type:"text"` UploadThread string `json:"upload_thread" default:"3"` + DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"` } var config = driver.Config{ diff --git a/drivers/doubao/types.go b/drivers/doubao/types.go index 4264eb7d..ae747f88 100644 --- a/drivers/doubao/types.go +++ b/drivers/doubao/types.go @@ -3,8 +3,9 @@ package doubao import ( "encoding/json" "fmt" - "github.com/alist-org/alist/v3/internal/model" "time" + + "github.com/alist-org/alist/v3/internal/model" ) type BaseResp struct { @@ -38,6 +39,17 @@ type File struct { UpdateTime int64 `json:"update_time"` } +type GetDownloadInfoResp struct { + BaseResp + Data struct { + DownloadInfos []struct { + NodeID string `json:"node_id"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"download_infos"` + } `json:"data"` +} + type GetFileUrlResp struct { BaseResp Data struct { From 11e7284824e2dfa5eb4517cd0ee5a61923b1746c Mon Sep 17 00:00:00 2001 From: yoclo <147054286+yclw@users.noreply.github.com> Date: Tue, 29 Apr 2025 23:14:16 +0800 Subject: [PATCH 184/187] fix: prevent guest user from updating profile (#8447) --- server/handles/auth.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/handles/auth.go b/server/handles/auth.go index e1f512c4..7a2c0fb5 100644 --- a/server/handles/auth.go +++ b/server/handles/auth.go @@ -113,6 +113,10 @@ func UpdateCurrent(c *gin.Context) { return } user := c.MustGet("user").(*model.User) + if user.IsGuest() { + common.ErrorStrResp(c, "Guest user can not update profile", 403) + return + } user.Username = req.Username if req.Password != "" { user.SetPassword(req.Password) From bc5117fa4f5f8d46a4ba200fb6f9f0b2c18010e7 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Fri, 2 May 2025 16:53:39 +0800 Subject: [PATCH 185/187] fix(115_open): add delay in MakeDir function to handle rate limiting --- drivers/115_open/driver.go | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go index 0eb943ac..3826c78f 100644 --- a/drivers/115_open/driver.go +++ b/drivers/115_open/driver.go @@ -117,6 +117,7 @@ func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri if err != nil { return nil, err } + time.Sleep(800 * time.Millisecond) return &Obj{ Fid: resp.FileID, Pid: parentDir.GetID(), From 630cf30af5544359a7ccb98ff6c844c643049df0 Mon Sep 17 00:00:00 2001 From: Andy Hsu Date: Sun, 11 May 2025 13:39:32 +0800 Subject: [PATCH 186/187] feat(115_open): implement rate limiting for API requests --- drivers/115_open/driver.go | 39 ++++++++++++++++++++++++++++++++++++-- drivers/115_open/meta.go | 7 ++++--- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go index 3826c78f..6121d3b2 100644 --- a/drivers/115_open/driver.go +++ b/drivers/115_open/driver.go @@ -16,12 +16,14 @@ import ( "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" sdk "github.com/xhofe/115-sdk-go" + "golang.org/x/time/rate" ) type Open115 struct { model.Storage Addition - client *sdk.Client + client *sdk.Client + limiter *rate.Limiter } func (d *Open115) Config() driver.Config { @@ -47,6 +49,16 @@ func (d *Open115) Init(ctx context.Context) error { if err != nil { return err } + if d.Addition.LimitRate > 0 { + d.limiter = rate.NewLimiter(rate.Limit(d.Addition.LimitRate), 1) + } + return nil +} + +func (d *Open115) WaitLimit(ctx context.Context) error { + if d.limiter != nil { + return d.limiter.Wait(ctx) + } return nil } @@ -59,6 +71,9 @@ func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) pageSize := int64(200) offset := int64(0) for { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{ CID: dir.GetID(), Limit: pageSize, @@ -84,6 +99,9 @@ func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) } func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } var ua string if args.Header != nil { ua = args.Header.Get("User-Agent") @@ -113,11 +131,13 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) } func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName) if err != nil { return nil, err } - time.Sleep(800 * time.Millisecond) return &Obj{ Fid: resp.FileID, Pid: parentDir.GetID(), @@ -130,6 +150,9 @@ func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri } func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } _, err := d.client.Move(ctx, &sdk.MoveReq{ FileIDs: srcObj.GetID(), ToCid: dstDir.GetID(), @@ -141,6 +164,9 @@ func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj } func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } _, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{ FileID: srcObj.GetID(), FileNma: newName, @@ -156,6 +182,9 @@ func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) } func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } _, err := d.client.Copy(ctx, &sdk.CopyReq{ PID: dstDir.GetID(), FileID: srcObj.GetID(), @@ -168,6 +197,9 @@ func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj } func (d *Open115) Remove(ctx context.Context, obj model.Obj) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } _obj, ok := obj.(*Obj) if !ok { return fmt.Errorf("can't convert obj") @@ -183,6 +215,9 @@ func (d *Open115) Remove(ctx context.Context, obj model.Obj) error { } func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } tempF, err := file.CacheFullInTempFile() if err != nil { return err diff --git a/drivers/115_open/meta.go b/drivers/115_open/meta.go index 7e26e0dd..66b956c0 100644 --- a/drivers/115_open/meta.go +++ b/drivers/115_open/meta.go @@ -9,9 +9,10 @@ type Addition struct { // Usually one of two driver.RootID // define other - RefreshToken string `json:"refresh_token" required:"true"` - OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"` - OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"` + RefreshToken string `json:"refresh_token" required:"true"` + OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"` + OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"` + LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"` AccessToken string } From ffa03bfda11aa18bb899afc1f29e8690fcea1036 Mon Sep 17 00:00:00 2001 From: MadDogOwner Date: Sat, 24 May 2025 13:38:43 +0800 Subject: [PATCH 187/187] feat(cloudreve_v4): add Cloudreve V4 driver (#8470 closes #8328 #8467) * feat(cloudreve_v4): add Cloudreve V4 driver implementation * fix(cloudreve_v4): update request handling to prevent token refresh loop * feat(onedrive): implement retry logic for upload failures * feat(cloudreve): implement retry logic for upload failures * feat(cloudreve_v4): support cloud sorting * fix(cloudreve_v4): improve token handling in Init method * feat(cloudreve_v4): support share * feat(cloudreve): support reference * feat(cloudreve_v4): support version upload * fix(cloudreve_v4): add SetBody in upLocal * fix(cloudreve_v4): update URL structure in Link and FileUrlResp --- drivers/all.go | 1 + drivers/cloudreve/driver.go | 11 + drivers/cloudreve/util.go | 157 ++++++++--- drivers/cloudreve_v4/driver.go | 305 +++++++++++++++++++++ drivers/cloudreve_v4/meta.go | 44 +++ drivers/cloudreve_v4/types.go | 164 ++++++++++++ drivers/cloudreve_v4/util.go | 476 +++++++++++++++++++++++++++++++++ drivers/onedrive/util.go | 33 ++- drivers/onedrive_app/util.go | 33 ++- 9 files changed, 1158 insertions(+), 66 deletions(-) create mode 100644 drivers/cloudreve_v4/driver.go create mode 100644 drivers/cloudreve_v4/meta.go create mode 100644 drivers/cloudreve_v4/types.go create mode 100644 drivers/cloudreve_v4/util.go diff --git a/drivers/all.go b/drivers/all.go index 0b8ce3aa..224fb8dd 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -22,6 +22,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/baidu_share" _ "github.com/alist-org/alist/v3/drivers/chaoxing" _ "github.com/alist-org/alist/v3/drivers/cloudreve" + _ "github.com/alist-org/alist/v3/drivers/cloudreve_v4" _ "github.com/alist-org/alist/v3/drivers/crypt" _ "github.com/alist-org/alist/v3/drivers/doubao" _ "github.com/alist-org/alist/v3/drivers/doubao_share" diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index 8c2321b8..dcde58c6 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -18,6 +18,7 @@ import ( type Cloudreve struct { model.Storage Addition + ref *Cloudreve } func (d *Cloudreve) Config() driver.Config { @@ -37,8 +38,18 @@ func (d *Cloudreve) Init(ctx context.Context) error { return d.login() } +func (d *Cloudreve) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*Cloudreve) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport +} + func (d *Cloudreve) Drop(ctx context.Context) error { d.Cookie = "" + d.ref = nil return nil } diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index 196d7303..5054de6c 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -4,12 +4,14 @@ import ( "bytes" "context" "encoding/base64" + "encoding/json" "errors" "fmt" "io" "net/http" "strconv" "strings" + "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" @@ -19,7 +21,6 @@ import ( "github.com/alist-org/alist/v3/pkg/cookie" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" - json "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go" ) @@ -35,6 +36,9 @@ func (d *Cloudreve) getUA() string { } func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error { + if d.ref != nil { + return d.ref.request(method, path, callback, out) + } u := d.Address + "/api/v3" + path req := base.RestyClient.R() req.SetHeaders(map[string]string{ @@ -79,11 +83,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac } if out != nil && r.Data != nil { var marshal []byte - marshal, err = json.Marshal(r.Data) + marshal, err = jsoniter.Marshal(r.Data) if err != nil { return err } - err = json.Unmarshal(marshal, out) + err = jsoniter.Unmarshal(marshal, out) if err != nil { return err } @@ -187,12 +191,9 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up if utils.IsCanceled(ctx) { return ctx.Err() } - utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish) - var byteSize = DEFAULT left := stream.GetSize() - finish - if left < DEFAULT { - byteSize = left - } + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) byteData := make([]byte, byteSize) n, err := io.ReadFull(stream, byteData) utils.Log.Debug(err, n) @@ -205,9 +206,26 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10)) req.SetHeader("User-Agent", d.getUA()) req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + req.AddRetryCondition(func(r *resty.Response, err error) bool { + if err != nil { + return true + } + if r.IsError() { + return true + } + var retryResp Resp + jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp) + if jErr != nil { + return true + } + if retryResp.Code != 0 { + return true + } + return false + }) }, nil) if err != nil { - break + return err } finish += byteSize up(float64(finish) * 100 / float64(stream.GetSize())) @@ -222,16 +240,15 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U var finish int64 = 0 var chunk int = 0 DEFAULT := int64(u.ChunkSize) + retryCount := 0 + maxRetries := 3 for finish < stream.GetSize() { if utils.IsCanceled(ctx) { return ctx.Err() } - utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish) - var byteSize = DEFAULT left := stream.GetSize() - finish - if left < DEFAULT { - byteSize = left - } + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) byteData := make([]byte, byteSize) n, err := io.ReadFull(stream, byteData) utils.Log.Debug(err, n) @@ -248,14 +265,43 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Authorization", fmt.Sprint(credential)) req.Header.Set("User-Agent", d.getUA()) - finish += byteSize - res, err := base.HttpClient.Do(req) - if err != nil { - return err + err = func() error { + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return errors.New(res.Status) + } + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + var up Resp + err = json.Unmarshal(body, &up) + if err != nil { + return err + } + if up.Code != 0 { + return errors.New(up.Msg) + } + return nil + }() + if err == nil { + retryCount = 0 + finish += byteSize + up(float64(finish) * 100 / float64(stream.GetSize())) + chunk++ + } else { + retryCount++ + if retryCount > maxRetries { + return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err) + } + backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + retryCount++ + if retryCount > maxRetries { + res.Body.Close() + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1< maxRetries { + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1< 0 { + src.Size = ds.FolderSummary.Size + } + } + var thumb model.Thumbnail + if d.EnableThumb && src.Type == 0 { + var t FileThumbResp + err := d.request(http.MethodGet, "/file/thumb", func(req *resty.Request) { + req.SetQueryParam("uri", src.Path) + }, &t) + if err == nil && t.URL != "" { + thumb = model.Thumbnail{ + Thumbnail: t.URL, + } + } + } + return &model.ObjThumb{ + Object: model.Object{ + ID: src.ID, + Path: src.Path, + Name: src.Name, + Size: src.Size, + Modified: src.UpdatedAt, + Ctime: src.CreatedAt, + IsFolder: src.Type == 1, + }, + Thumbnail: thumb, + }, nil + }) +} + +func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var url FileUrlResp + err := d.request(http.MethodPost, "/file/url", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{file.GetPath()}, + "download": true, + }) + }, &url) + if err != nil { + return nil, err + } + if len(url.Urls) == 0 { + return nil, errors.New("server returns no url") + } + exp := time.Until(url.Expires) + return &model.Link{ + URL: url.Urls[0].URL, + Expiration: &exp, + }, nil +} + +func (d *CloudreveV4) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + return d.request(http.MethodPost, "/file/create", func(req *resty.Request) { + req.SetBody(base.Json{ + "type": "folder", + "uri": parentDir.GetPath() + "/" + dirName, + "error_on_conflict": true, + }) + }, nil) +} + +func (d *CloudreveV4) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.request(http.MethodPost, "/file/move", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{srcObj.GetPath()}, + "dst": dstDir.GetPath(), + "copy": false, + }) + }, nil) +} + +func (d *CloudreveV4) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + return d.request(http.MethodPost, "/file/create", func(req *resty.Request) { + req.SetBody(base.Json{ + "new_name": newName, + "uri": srcObj.GetPath(), + }) + }, nil) + +} + +func (d *CloudreveV4) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.request(http.MethodPost, "/file/move", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{srcObj.GetPath()}, + "dst": dstDir.GetPath(), + "copy": true, + }) + }, nil) +} + +func (d *CloudreveV4) Remove(ctx context.Context, obj model.Obj) error { + return d.request(http.MethodDelete, "/file", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{obj.GetPath()}, + "unlink": false, + "skip_soft_delete": true, + }) + }, nil) +} + +func (d *CloudreveV4) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + if file.GetSize() == 0 { + // 空文件使用新建文件方法,避免上传卡锁 + return d.request(http.MethodPost, "/file/create", func(req *resty.Request) { + req.SetBody(base.Json{ + "type": "file", + "uri": dstDir.GetPath() + "/" + file.GetName(), + "error_on_conflict": true, + }) + }, nil) + } + var p StoragePolicy + var r FileResp + var u FileUploadResp + var err error + params := map[string]string{ + "page_size": "10", + "uri": dstDir.GetPath(), + "order_by": "created_at", + "order_direction": "asc", + "page": "0", + } + err = d.request(http.MethodGet, "/file", func(req *resty.Request) { + req.SetQueryParams(params) + }, &r) + if err != nil { + return err + } + p = r.StoragePolicy + body := base.Json{ + "uri": dstDir.GetPath() + "/" + file.GetName(), + "size": file.GetSize(), + "policy_id": p.ID, + "last_modified": file.ModTime().UnixMilli(), + "mime_type": "", + } + if d.EnableVersionUpload { + body["entity_type"] = "version" + } + err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) { + req.SetBody(body) + }, &u) + if err != nil { + return err + } + if u.StoragePolicy.Relay { + err = d.upLocal(ctx, file, u, up) + } else { + switch u.StoragePolicy.Type { + case "local": + err = d.upLocal(ctx, file, u, up) + case "remote": + err = d.upRemote(ctx, file, u, up) + case "onedrive": + err = d.upOneDrive(ctx, file, u, up) + case "s3": + err = d.upS3(ctx, file, u, up) + default: + return errs.NotImplement + } + } + if err != nil { + // 删除失败的会话 + _ = d.request(http.MethodDelete, "/file/upload", func(req *resty.Request) { + req.SetBody(base.Json{ + "id": u.SessionID, + "uri": u.URI, + }) + }, nil) + return err + } + return nil +} + +func (d *CloudreveV4) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *CloudreveV4) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *CloudreveV4) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { + // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional + // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir + // return errs.NotImplement to use an internal archive tool + return nil, errs.NotImplement +} + +//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*CloudreveV4)(nil) diff --git a/drivers/cloudreve_v4/meta.go b/drivers/cloudreve_v4/meta.go new file mode 100644 index 00000000..bfaa14f8 --- /dev/null +++ b/drivers/cloudreve_v4/meta.go @@ -0,0 +1,44 @@ +package cloudreve_v4 + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + driver.RootPath + // driver.RootID + // define other + Address string `json:"address" required:"true"` + Username string `json:"username"` + Password string `json:"password"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + CustomUA string `json:"custom_ua"` + EnableFolderSize bool `json:"enable_folder_size"` + EnableThumb bool `json:"enable_thumb"` + EnableVersionUpload bool `json:"enable_version_upload"` + OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at" default:"name" required:"true"` + OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc" required:"true"` +} + +var config = driver.Config{ + Name: "Cloudreve V4", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "cloudreve://my", + CheckStatus: true, + Alert: "", + NoOverwriteUpload: true, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &CloudreveV4{} + }) +} diff --git a/drivers/cloudreve_v4/types.go b/drivers/cloudreve_v4/types.go new file mode 100644 index 00000000..e81226d3 --- /dev/null +++ b/drivers/cloudreve_v4/types.go @@ -0,0 +1,164 @@ +package cloudreve_v4 + +import ( + "time" + + "github.com/alist-org/alist/v3/internal/model" +) + +type Object struct { + model.Object + StoragePolicy StoragePolicy +} + +type Resp struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data any `json:"data"` +} + +type BasicConfigResp struct { + InstanceID string `json:"instance_id"` + // Title string `json:"title"` + // Themes string `json:"themes"` + // DefaultTheme string `json:"default_theme"` + User struct { + ID string `json:"id"` + // Nickname string `json:"nickname"` + // CreatedAt time.Time `json:"created_at"` + // Anonymous bool `json:"anonymous"` + Group struct { + ID string `json:"id"` + Name string `json:"name"` + Permission string `json:"permission"` + } `json:"group"` + } `json:"user"` + // Logo string `json:"logo"` + // LogoLight string `json:"logo_light"` + // CaptchaReCaptchaKey string `json:"captcha_ReCaptchaKey"` + CaptchaType string `json:"captcha_type"` // support 'normal' only + // AppPromotion bool `json:"app_promotion"` +} + +type SiteLoginConfigResp struct { + LoginCaptcha bool `json:"login_captcha"` + Authn bool `json:"authn"` +} + +type PrepareLoginResp struct { + WebauthnEnabled bool `json:"webauthn_enabled"` + PasswordEnabled bool `json:"password_enabled"` +} + +type CaptchaResp struct { + Image string `json:"image"` + Ticket string `json:"ticket"` +} + +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + AccessExpires time.Time `json:"access_expires"` + RefreshExpires time.Time `json:"refresh_expires"` +} + +type TokenResponse struct { + User struct { + ID string `json:"id"` + // Email string `json:"email"` + // Nickname string `json:"nickname"` + Status string `json:"status"` + // CreatedAt time.Time `json:"created_at"` + Group struct { + ID string `json:"id"` + Name string `json:"name"` + Permission string `json:"permission"` + // DirectLinkBatchSize int `json:"direct_link_batch_size"` + // TrashRetention int `json:"trash_retention"` + } `json:"group"` + // Language string `json:"language"` + } `json:"user"` + Token Token `json:"token"` +} + +type File struct { + Type int `json:"type"` // 0: file, 1: folder + ID string `json:"id"` + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Size int64 `json:"size"` + Metadata interface{} `json:"metadata"` + Path string `json:"path"` + Capability string `json:"capability"` + Owned bool `json:"owned"` + PrimaryEntity string `json:"primary_entity"` +} + +type StoragePolicy struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + MaxSize int64 `json:"max_size"` + Relay bool `json:"relay,omitempty"` +} + +type Pagination struct { + Page int `json:"page"` + PageSize int `json:"page_size"` + IsCursor bool `json:"is_cursor"` + NextToken string `json:"next_token,omitempty"` +} + +type Props struct { + Capability string `json:"capability"` + MaxPageSize int `json:"max_page_size"` + OrderByOptions []string `json:"order_by_options"` + OrderDirectionOptions []string `json:"order_direction_options"` +} + +type FileResp struct { + Files []File `json:"files"` + Parent File `json:"parent"` + Pagination Pagination `json:"pagination"` + Props Props `json:"props"` + ContextHint string `json:"context_hint"` + MixedType bool `json:"mixed_type"` + StoragePolicy StoragePolicy `json:"storage_policy"` +} + +type FileUrlResp struct { + Urls []struct { + URL string `json:"url"` + } `json:"urls"` + Expires time.Time `json:"expires"` +} + +type FileUploadResp struct { + // UploadID string `json:"upload_id"` + SessionID string `json:"session_id"` + ChunkSize int64 `json:"chunk_size"` + Expires int64 `json:"expires"` + StoragePolicy StoragePolicy `json:"storage_policy"` + URI string `json:"uri"` + CompleteURL string `json:"completeURL,omitempty"` // for S3-like + CallbackSecret string `json:"callback_secret,omitempty"` // for S3-like, OneDrive + UploadUrls []string `json:"upload_urls,omitempty"` // for not-local + Credential string `json:"credential,omitempty"` // for local +} + +type FileThumbResp struct { + URL string `json:"url"` + Expires time.Time `json:"expires"` +} + +type FolderSummaryResp struct { + File + FolderSummary struct { + Size int64 `json:"size"` + Files int64 `json:"files"` + Folders int64 `json:"folders"` + Completed bool `json:"completed"` + CalculatedAt time.Time `json:"calculated_at"` + } `json:"folder_summary"` +} diff --git a/drivers/cloudreve_v4/util.go b/drivers/cloudreve_v4/util.go new file mode 100644 index 00000000..cf2337f2 --- /dev/null +++ b/drivers/cloudreve_v4/util.go @@ -0,0 +1,476 @@ +package cloudreve_v4 + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" + jsoniter "github.com/json-iterator/go" +) + +// do others that not defined in Driver interface + +func (d *CloudreveV4) getUA() string { + if d.CustomUA != "" { + return d.CustomUA + } + return base.UserAgent +} + +func (d *CloudreveV4) request(method string, path string, callback base.ReqCallback, out any) error { + if d.ref != nil { + return d.ref.request(method, path, callback, out) + } + u := d.Address + "/api/v4" + path + req := base.RestyClient.R() + req.SetHeaders(map[string]string{ + "Accept": "application/json, text/plain, */*", + "User-Agent": d.getUA(), + }) + if d.AccessToken != "" { + req.SetHeader("Authorization", "Bearer "+d.AccessToken) + } + + var r Resp + req.SetResult(&r) + + if callback != nil { + callback(req) + } + + resp, err := req.Execute(method, u) + if err != nil { + return err + } + if !resp.IsSuccess() { + return errors.New(resp.String()) + } + + if r.Code != 0 { + if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" { + // try to refresh token + err = d.refreshToken() + if err != nil { + return err + } + return d.request(method, path, callback, out) + } + return errors.New(r.Msg) + } + + if out != nil && r.Data != nil { + var marshal []byte + marshal, err = json.Marshal(r.Data) + if err != nil { + return err + } + err = json.Unmarshal(marshal, out) + if err != nil { + return err + } + } + + return nil +} + +func (d *CloudreveV4) login() error { + var siteConfig SiteLoginConfigResp + err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig) + if err != nil { + return err + } + if !siteConfig.Authn { + return errors.New("authn not support") + } + var prepareLogin PrepareLoginResp + err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin) + if err != nil { + return err + } + if !prepareLogin.PasswordEnabled { + return errors.New("password not enabled") + } + if prepareLogin.WebauthnEnabled { + return errors.New("webauthn not support") + } + for range 5 { + err = d.doLogin(siteConfig.LoginCaptcha) + if err == nil { + break + } + if err.Error() != "CAPTCHA not match." { + break + } + } + return err +} + +func (d *CloudreveV4) doLogin(needCaptcha bool) error { + var err error + loginBody := base.Json{ + "email": d.Username, + "password": d.Password, + } + if needCaptcha { + var config BasicConfigResp + err = d.request(http.MethodGet, "/site/config/basic", nil, &config) + if err != nil { + return err + } + if config.CaptchaType != "normal" { + return fmt.Errorf("captcha type %s not support", config.CaptchaType) + } + var captcha CaptchaResp + err = d.request(http.MethodGet, "/site/captcha", nil, &captcha) + if err != nil { + return err + } + if !strings.HasPrefix(captcha.Image, "data:image/png;base64,") { + return errors.New("can not get captcha") + } + loginBody["ticket"] = captcha.Ticket + i := strings.Index(captcha.Image, ",") + dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha.Image[i+1:])) + vRes, err := base.RestyClient.R().SetMultipartField( + "image", "validateCode.png", "image/png", dec). + Post(setting.GetStr(conf.OcrApi)) + if err != nil { + return err + } + if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 { + return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString()) + } + captchaCode := jsoniter.Get(vRes.Body(), "result").ToString() + if captchaCode == "" { + return errors.New("ocr error: empty result") + } + loginBody["captcha"] = captchaCode + } + var token TokenResponse + err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) { + req.SetBody(loginBody) + }, &token) + if err != nil { + return err + } + d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken + op.MustSaveDriverStorage(d) + return nil +} + +func (d *CloudreveV4) refreshToken() error { + var token Token + if token.RefreshToken == "" { + if d.Username != "" { + err := d.login() + if err != nil { + return fmt.Errorf("cannot login to get refresh token, error: %s", err) + } + } + return nil + } + err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) { + req.SetBody(base.Json{ + "refresh_token": d.RefreshToken, + }) + }, &token) + if err != nil { + return err + } + d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken + op.MustSaveDriverStorage(d) + return nil +} + +func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { + var finish int64 = 0 + var chunk int = 0 + DEFAULT := int64(u.ChunkSize) + if DEFAULT == 0 { + // support relay + DEFAULT = file.GetSize() + } + for finish < file.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + left := file.GetSize() - finish + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[CloudreveV4-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) + byteData := make([]byte, byteSize) + n, err := io.ReadFull(file, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { + req.SetHeader("Content-Type", "application/octet-stream") + req.SetContentLength(true) + req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10)) + req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + req.AddRetryCondition(func(r *resty.Response, err error) bool { + if err != nil { + return true + } + if r.IsError() { + return true + } + var retryResp Resp + jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp) + if jErr != nil { + return true + } + if retryResp.Code != 0 { + return true + } + return false + }) + }, nil) + if err != nil { + return err + } + finish += byteSize + up(float64(finish) * 100 / float64(file.GetSize())) + chunk++ + } + return nil +} + +func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { + uploadUrl := u.UploadUrls[0] + credential := u.Credential + var finish int64 = 0 + var chunk int = 0 + DEFAULT := int64(u.ChunkSize) + retryCount := 0 + maxRetries := 3 + for finish < file.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + left := file.GetSize() - finish + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) + byteData := make([]byte, byteSize) + n, err := io.ReadFull(file, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), + driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.ContentLength = byteSize + // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.Header.Set("Authorization", fmt.Sprint(credential)) + req.Header.Set("User-Agent", d.getUA()) + err = func() error { + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return errors.New(res.Status) + } + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + var up Resp + err = json.Unmarshal(body, &up) + if err != nil { + return err + } + if up.Code != 0 { + return errors.New(up.Msg) + } + return nil + }() + if err == nil { + retryCount = 0 + finish += byteSize + up(float64(finish) * 100 / float64(file.GetSize())) + chunk++ + } else { + retryCount++ + if retryCount > maxRetries { + return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err) + } + backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + retryCount++ + if retryCount > maxRetries { + res.Body.Close() + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1< maxRetries { + return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries) + } + backoff := time.Duration(1<") + for i, etag := range etags { + bodyBuilder.WriteString(fmt.Sprintf( + `%d%s`, + i+1, // PartNumber 从 1 开始 + etag, + )) + } + bodyBuilder.WriteString("") + req, err := http.NewRequest( + "POST", + u.CompleteURL, + strings.NewReader(bodyBuilder.String()), + ) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/xml") + req.Header.Set("User-Agent", d.getUA()) + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body)) + } + + // 上传成功发送回调请求 + return d.request(http.MethodPost, "/callback/s3/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) { + req.SetBody("{}") + }, nil) +} diff --git a/drivers/onedrive/util.go b/drivers/onedrive/util.go index e256b7ae..28ed5ccc 100644 --- a/drivers/onedrive/util.go +++ b/drivers/onedrive/util.go @@ -8,6 +8,7 @@ import ( "io" "net/http" stdpath "path" + "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" @@ -17,7 +18,6 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" jsoniter "github.com/json-iterator/go" - log "github.com/sirupsen/logrus" ) var onedriveHostMap = map[string]Host{ @@ -204,19 +204,18 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil uploadUrl := jsoniter.Get(res, "uploadUrl").ToString() var finish int64 = 0 DEFAULT := d.ChunkSize * 1024 * 1024 + retryCount := 0 + maxRetries := 3 for finish < stream.GetSize() { if utils.IsCanceled(ctx) { return ctx.Err() } - log.Debugf("upload: %d", finish) - var byteSize int64 = DEFAULT left := stream.GetSize() - finish - if left < DEFAULT { - byteSize = left - } + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) byteData := make([]byte, byteSize) n, err := io.ReadFull(stream, byteData) - log.Debug(err, n) + utils.Log.Debug(err, n) if err != nil { return err } @@ -228,19 +227,31 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil req.ContentLength = byteSize // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) - finish += byteSize res, err := base.HttpClient.Do(req) if err != nil { return err } // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession - if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 { + switch { + case res.StatusCode >= 500 && res.StatusCode <= 504: + retryCount++ + if retryCount > maxRetries { + res.Body.Close() + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + retryCount++ + if retryCount > maxRetries { + res.Body.Close() + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1<