diff --git a/.github/workflows/beta_release.yml b/.github/workflows/beta_release.yml index 32073eb9..485942c4 100644 --- a/.github/workflows/beta_release.yml +++ b/.github/workflows/beta_release.yml @@ -8,6 +8,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true +permissions: + contents: write + jobs: changelog: strategy: @@ -54,7 +57,7 @@ jobs: strategy: matrix: include: - - target: '!(*musl*|*windows-arm64*|*android*)' # xgo + - target: '!(*musl*|*windows-arm64*|*android*|*freebsd*)' # xgo hash: "md5" - target: 'linux-!(arm*)-musl*' #musl-not-arm hash: "md5-linux-musl" @@ -64,6 +67,9 @@ jobs: hash: "md5-windows-arm64" - target: 'android-*' #android hash: "md5-android" + - target: 'freebsd-*' #freebsd + hash: "md5-freebsd" + name: Beta Release runs-on: ubuntu-latest steps: @@ -81,12 +87,17 @@ jobs: run: bash build.sh dev web - name: Build - id: test-action uses: go-cross/cgo-actions@v1 with: targets: ${{ matrix.target }} musl-target-format: $os-$musl-$arch out-dir: build + x-flags: | + github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at + github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe + github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit + github.com/alist-org/alist/v3/internal/conf.Version=$tag + github.com/alist-org/alist/v3/internal/conf.WebVersion=dev - name: Compress run: | @@ -105,14 +116,23 @@ jobs: name: Beta Release Desktop runs-on: ubuntu-latest steps: - - uses: peter-evans/create-or-update-comment@v4 + - name: Checkout repo + uses: actions/checkout@v4 with: - issue-number: 69 - body: | - /release-beta - - triggered by @${{ github.actor }} - - commit sha: ${{ github.sha }} - - view files: https://github.com/alist-org/alist/tree/${{ github.sha }} - reactions: 'rocket' - token: ${{ secrets.MY_TOKEN }} + repository: alist-org/desktop-release + ref: main + persist-credentials: false + fetch-depth: 0 + + - name: Commit + run: | + git config --local user.email "bot@nn.ci" + git config --local user.name "IlaBot" + git commit --allow-empty -m "Trigger build for ${{ github.sha }}" + + - name: Push commit + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.MY_TOKEN }} + branch: main repository: alist-org/desktop-release \ No newline at end of file diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b059a20b..a2c934e7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,14 +15,17 @@ jobs: strategy: matrix: platform: [ubuntu-latest] - go-version: [ '1.21' ] + target: + - darwin-amd64 + - darwin-arm64 + - windows-amd64 + - linux-arm64-musl + - linux-amd64-musl + - windows-arm64 + - android-arm64 name: Build runs-on: ${{ matrix.platform }} steps: - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ matrix.go-version }} - name: Checkout uses: actions/checkout@v4 @@ -30,19 +33,29 @@ jobs: - uses: benjlevesque/short-sha@v3.0 id: short-sha - - name: Install dependencies - run: | - sudo snap install zig --classic --beta - docker pull crazymax/xgo:latest - go install github.com/crazy-max/xgo@latest - sudo apt install upx + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '1.22' + + - name: Setup web + run: bash build.sh dev web - name: Build - run: | - bash build.sh dev + uses: go-cross/cgo-actions@v1 + with: + targets: ${{ matrix.target }} + musl-target-format: $os-$musl-$arch + out-dir: build + x-flags: | + github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at + github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe + github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit + github.com/alist-org/alist/v3/internal/conf.Version=$tag + github.com/alist-org/alist/v3/internal/conf.WebVersion=dev - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: alist_${{ env.SHA }} - path: dist \ No newline at end of file + name: alist_${{ env.SHA }}_${{ matrix.target }} + path: build/* \ No newline at end of file diff --git a/.github/workflows/build_docker.yml b/.github/workflows/build_docker.yml deleted file mode 100644 index 8f37688d..00000000 --- a/.github/workflows/build_docker.yml +++ /dev/null @@ -1,126 +0,0 @@ -name: build_docker - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - build_docker: - name: Build Docker - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: xhofe/alist - tags: | - type=schedule - type=ref,event=branch - type=ref,event=tag - type=ref,event=pr - type=raw,value=beta,enable={{is_default_branch}} - - - name: Docker meta with ffmpeg - id: meta-ffmpeg - uses: docker/metadata-action@v5 - with: - images: xhofe/alist - flavor: | - suffix=-ffmpeg - tags: | - type=schedule - type=ref,event=branch - type=ref,event=tag - type=ref,event=pr - type=raw,value=beta,enable={{is_default_branch}} - - - uses: actions/setup-go@v5 - with: - go-version: 'stable' - - - name: Cache Musl - id: cache-musl - uses: actions/cache@v4 - with: - path: build/musl-libs - key: docker-musl-libs - - - name: Download Musl Library - if: steps.cache-musl.outputs.cache-hit != 'true' - run: bash build.sh prepare docker-multiplatform - - - name: Build go binary - run: bash build.sh dev docker-multiplatform - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to DockerHub - if: github.event_name == 'push' - uses: docker/login-action@v3 - with: - username: xhofe - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and push - id: docker_build - uses: docker/build-push-action@v6 - with: - context: . - file: Dockerfile.ci - push: ${{ github.event_name == 'push' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x - - - name: Build and push with ffmpeg - id: docker_build_ffmpeg - uses: docker/build-push-action@v6 - with: - context: . - file: Dockerfile.ci - push: ${{ github.event_name == 'push' }} - tags: ${{ steps.meta-ffmpeg.outputs.tags }} - labels: ${{ steps.meta-ffmpeg.outputs.labels }} - build-args: INSTALL_FFMPEG=true - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x - - build_docker_with_aria2: - needs: build_docker - name: Build docker with aria2 - runs-on: ubuntu-latest - if: github.event_name == 'push' - steps: - - name: Checkout repo - uses: actions/checkout@v4 - with: - repository: alist-org/with_aria2 - ref: main - persist-credentials: false - fetch-depth: 0 - - - name: Commit - run: | - git config --local user.email "bot@nn.ci" - git config --local user.name "IlaBot" - git commit --allow-empty -m "Trigger build for ${{ github.sha }}" - - - name: Push commit - uses: ad-m/github-push-action@master - with: - github_token: ${{ secrets.MY_TOKEN }} - branch: main - repository: alist-org/with_aria2 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6ef38566..1d42019a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,6 +13,23 @@ jobs: name: Release runs-on: ${{ matrix.platform }} steps: + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + # this might remove tools that are actually needed, + # if set to "true" but frees about 6 GB + tool-cache: false + + # all of these default to true, but feel free to set to + # "false" if necessary for your workflow + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: true + swap-storage: true + - name: Prerelease uses: irongut/EditRelease@v1.2.0 with: diff --git a/.github/workflows/release_docker.yml b/.github/workflows/release_docker.yml index 95a686b2..7cd05549 100644 --- a/.github/workflows/release_docker.yml +++ b/.github/workflows/release_docker.yml @@ -4,10 +4,34 @@ on: push: tags: - 'v*' + branches: + - main + pull_request: + branches: + - main + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + REGISTRY: 'xhofe/alist' + REGISTRY_USERNAME: 'xhofe' + REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} + ARTIFACT_NAME: 'binaries_docker_release' + RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64' + IMAGE_PUSH: ${{ github.event_name == 'push' }} + IMAGE_IS_PROD: ${{ github.ref_type == 'tag' }} + IMAGE_TAGS_BETA: | + type=schedule + type=ref,event=branch + type=ref,event=tag + type=ref,event=pr + type=raw,value=beta,enable={{is_default_branch}} jobs: - release_docker: - name: Release Docker + build_binary: + name: Build Binaries for Docker Release runs-on: ubuntu-latest steps: - name: Checkout @@ -22,20 +46,59 @@ jobs: uses: actions/cache@v4 with: path: build/musl-libs - key: docker-musl-libs + key: docker-musl-libs-v2 - name: Download Musl Library if: steps.cache-musl.outputs.cache-hit != 'true' run: bash build.sh prepare docker-multiplatform - - name: Build go binary + - name: Build go binary (beta) + if: env.IMAGE_IS_PROD != 'true' + run: bash build.sh beta docker-multiplatform + + - name: Build go binary (release) + if: env.IMAGE_IS_PROD == 'true' run: bash build.sh release docker-multiplatform - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 + - name: Upload artifacts + uses: actions/upload-artifact@v4 with: - images: xhofe/alist + name: ${{ env.ARTIFACT_NAME }} + overwrite: true + path: | + build/ + !build/*.tgz + !build/musl-libs/** + + release_docker: + needs: build_binary + name: Release Docker image + runs-on: ubuntu-latest + strategy: + matrix: + image: ["latest", "ffmpeg", "aria2", "aio"] + include: + - image: "latest" + build_arg: "" + tag_favor: "" + - image: "ffmpeg" + build_arg: INSTALL_FFMPEG=true + tag_favor: "suffix=-ffmpeg,onlatest=true" + - image: "aria2" + build_arg: INSTALL_ARIA2=true + tag_favor: "suffix=-aria2,onlatest=true" + - image: "aio" + build_arg: | + INSTALL_FFMPEG=true + INSTALL_ARIA2=true + tag_favor: "suffix=-aio,onlatest=true" + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/download-artifact@v4 + with: + name: ${{ env.ARTIFACT_NAME }} + path: 'build/' - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -44,10 +107,22 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Login to DockerHub + if: env.IMAGE_PUSH == 'true' uses: docker/login-action@v3 with: - username: xhofe - password: ${{ secrets.DOCKERHUB_TOKEN }} + logout: true + username: ${{ env.REGISTRY_USERNAME }} + password: ${{ env.REGISTRY_PASSWORD }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }} + tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }} + flavor: | + ${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }} + ${{ matrix.tag_favor }} - name: Build and push id: docker_build @@ -55,54 +130,8 @@ jobs: with: context: . file: Dockerfile.ci - push: true + push: ${{ env.IMAGE_PUSH == 'true' }} + build-args: ${{ matrix.build_arg }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x - - - name: Docker meta with ffmpeg - id: meta-ffmpeg - uses: docker/metadata-action@v5 - with: - images: xhofe/alist - flavor: | - latest=true - suffix=-ffmpeg,onlatest=true - - - name: Build and push with ffmpeg - id: docker_build_ffmpeg - uses: docker/build-push-action@v6 - with: - context: . - file: Dockerfile.ci - push: true - tags: ${{ steps.meta-ffmpeg.outputs.tags }} - labels: ${{ steps.meta-ffmpeg.outputs.labels }} - build-args: INSTALL_FFMPEG=true - platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x - - release_docker_with_aria2: - needs: release_docker - name: Release docker with aria2 - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@v4 - with: - repository: alist-org/with_aria2 - ref: main - persist-credentials: false - fetch-depth: 0 - - - name: Add tag - run: | - git config --local user.email "bot@nn.ci" - git config --local user.name "IlaBot" - git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}" - - - name: Push tags - uses: ad-m/github-push-action@master - with: - github_token: ${{ secrets.MY_TOKEN }} - branch: main - repository: alist-org/with_aria2 + platforms: ${{ env.RELEASE_PLATFORMS }} \ No newline at end of file diff --git a/.github/workflows/release_freebsd.yml b/.github/workflows/release_freebsd.yml new file mode 100644 index 00000000..70dcecb1 --- /dev/null +++ b/.github/workflows/release_freebsd.yml @@ -0,0 +1,34 @@ +name: release_freebsd + +on: + release: + types: [ published ] + +jobs: + release_freebsd: + strategy: + matrix: + platform: [ ubuntu-latest ] + go-version: [ '1.21' ] + name: Release + runs-on: ${{ matrix.platform }} + steps: + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Build + run: | + bash build.sh release freebsd + + - name: Upload assets + uses: softprops/action-gh-release@v2 + with: + files: build/compress/* diff --git a/Dockerfile b/Dockerfile index 74fa2165..f5e91bee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,7 @@ RUN bash build.sh release docker FROM alpine:edge ARG INSTALL_FFMPEG=false +ARG INSTALL_ARIA2=false LABEL MAINTAINER="i@nn.ci" WORKDIR /opt/alist/ @@ -18,13 +19,24 @@ RUN apk update && \ apk upgrade --no-cache && \ apk add --no-cache bash ca-certificates su-exec tzdata; \ [ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \ + [ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \ + mkdir -p /opt/aria2/.aria2 && \ + wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \ + tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \ + sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \ + sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \ + touch /opt/aria2/.aria2/aria2.session && \ + /opt/aria2/.aria2/tracker.sh ; \ rm -rf /var/cache/apk/* -COPY --from=builder /app/bin/alist ./ -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh && /entrypoint.sh version +COPY --chmod=755 --from=builder /app/bin/alist ./ +COPY --chmod=755 entrypoint.sh /entrypoint.sh +RUN /entrypoint.sh version -ENV PUID=0 PGID=0 UMASK=022 +ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} VOLUME /opt/alist/data/ EXPOSE 5244 5245 CMD [ "/entrypoint.sh" ] \ No newline at end of file diff --git a/Dockerfile.ci b/Dockerfile.ci index 3f437f16..a17aae9f 100644 --- a/Dockerfile.ci +++ b/Dockerfile.ci @@ -2,6 +2,7 @@ FROM alpine:edge ARG TARGETPLATFORM ARG INSTALL_FFMPEG=false +ARG INSTALL_ARIA2=false LABEL MAINTAINER="i@nn.ci" WORKDIR /opt/alist/ @@ -10,13 +11,24 @@ RUN apk update && \ apk upgrade --no-cache && \ apk add --no-cache bash ca-certificates su-exec tzdata; \ [ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \ + [ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \ + mkdir -p /opt/aria2/.aria2 && \ + wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \ + tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \ + sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \ + sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \ + sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \ + touch /opt/aria2/.aria2/aria2.session && \ + /opt/aria2/.aria2/tracker.sh ; \ rm -rf /var/cache/apk/* -COPY /build/${TARGETPLATFORM}/alist ./ -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh && /entrypoint.sh version +COPY --chmod=755 /build/${TARGETPLATFORM}/alist ./ +COPY --chmod=755 entrypoint.sh /entrypoint.sh +RUN /entrypoint.sh version -ENV PUID=0 PGID=0 UMASK=022 +ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} VOLUME /opt/alist/data/ EXPOSE 5244 5245 CMD [ "/entrypoint.sh" ] \ No newline at end of file diff --git a/README.md b/README.md index bed2eadf..1261839e 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ --- -English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md) +English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md) ## Features @@ -58,7 +58,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing] - [x] WebDav(Support OneDrive/SharePoint without API) - [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ )) - [x] [Mediatrack](https://www.mediatrack.cn/) - - [x] [139yun](https://yun.139.com/) (Personal, Family) + - [x] [139yun](https://yun.139.com/) (Personal, Family, Group) - [x] [YandexDisk](https://disk.yandex.com/) - [x] [BaiduNetdisk](http://pan.baidu.com/) - [x] [Terabox](https://www.terabox.com/main) @@ -77,6 +77,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing] - [x] [Dropbox](https://www.dropbox.com/) - [x] [FeijiPan](https://www.feijipan.com/) - [x] [dogecloud](https://www.dogecloud.com/product/oss) + - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) - [x] Easy to deploy and out-of-the-box - [x] File preview (PDF, markdown, code, plain text, ...) - [x] Image preview in gallery mode @@ -98,7 +99,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing] ## Document - + ## Demo @@ -138,4 +139,4 @@ The `AList` is open-source software licensed under the AGPL-3.0 license. --- -> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2) +> [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2) diff --git a/README_cn.md b/README_cn.md index 7e45d60f..5c71ccce 100644 --- a/README_cn.md +++ b/README_cn.md @@ -58,7 +58,7 @@ - [x] WebDav(支持无API的OneDrive/SharePoint) - [x] Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ )) - [x] [分秒帧](https://www.mediatrack.cn/) - - [x] [和彩云](https://yun.139.com/) (个人云, 家庭云) + - [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组) - [x] [Yandex.Disk](https://disk.yandex.com/) - [x] [百度网盘](http://pan.baidu.com/) - [x] [UC网盘](https://drive.uc.cn) diff --git a/README_ja.md b/README_ja.md index 453e7b99..cd4446fa 100644 --- a/README_ja.md +++ b/README_ja.md @@ -58,7 +58,7 @@ - [x] WebDav(Support OneDrive/SharePoint without API) - [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ )) - [x] [Mediatrack](https://www.mediatrack.cn/) - - [x] [139yun](https://yun.139.com/) (Personal, Family) + - [x] [139yun](https://yun.139.com/) (Personal, Family, Group) - [x] [YandexDisk](https://disk.yandex.com/) - [x] [BaiduNetdisk](http://pan.baidu.com/) - [x] [Terabox](https://www.terabox.com/main) diff --git a/build.sh b/build.sh index 18a30e63..2dee8e20 100644 --- a/build.sh +++ b/build.sh @@ -1,12 +1,14 @@ appName="alist" builtAt="$(date +'%F %T %z')" -goVersion=$(go version | sed 's/go version //') gitAuthor="Xhofe " gitCommit=$(git log --pretty=format:"%h" -1) if [ "$1" = "dev" ]; then version="dev" webVersion="dev" +elif [ "$1" = "beta" ]; then + version="beta" + webVersion="dev" else git tag -d beta version=$(git describe --abbrev=0 --tags) @@ -19,7 +21,6 @@ echo "frontend version: $webVersion" ldflags="\ -w -s \ -X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \ --X 'github.com/alist-org/alist/v3/internal/conf.GoVersion=$goVersion' \ -X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \ -X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \ -X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \ @@ -93,7 +94,7 @@ BuildDocker() { PrepareBuildDockerMusl() { mkdir -p build/musl-libs BASE="https://musl.cc/" - FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross) + FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross) for i in "${FILES[@]}"; do url="${BASE}${i}.tgz" lib_tgz="build/${i}.tgz" @@ -112,8 +113,8 @@ BuildDockerMultiplatform() { docker_lflags="--extldflags '-static -fpic' $ldflags" export CGO_ENABLED=1 - OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x) - CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc) + OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le) + CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc) for i in "${!OS_ARCHES[@]}"; do os_arch=${OS_ARCHES[$i]} cgo_cc=${CGO_ARGS[$i]} @@ -233,6 +234,29 @@ BuildReleaseAndroid() { done } +BuildReleaseFreeBSD() { + rm -rf .git/ + mkdir -p "build/freebsd" + OS_ARCHES=(amd64 arm64 i386) + GO_ARCHES=(amd64 arm64 386) + CGO_ARGS=(x86_64-unknown-freebsd14.1 aarch64-unknown-freebsd14.1 i386-unknown-freebsd14.1) + for i in "${!OS_ARCHES[@]}"; do + os_arch=${OS_ARCHES[$i]} + cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}" + echo building for freebsd-${os_arch} + sudo mkdir -p "/opt/freebsd/${os_arch}" + wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz + sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch} + rm base.txz + export GOOS=freebsd + export GOARCH=${GO_ARCHES[$i]} + export CC=${cgo_cc} + export CGO_ENABLED=1 + export CGO_LDFLAGS="-fuse-ld=lld" + go build -o ./build/$appName-freebsd-$os_arch -ldflags="$ldflags" -tags=jsoniter . + done +} + MakeRelease() { cd build mkdir compress @@ -251,6 +275,11 @@ MakeRelease() { tar -czvf compress/"$i".tar.gz alist rm -f alist done + for i in $(find . -type f -name "$appName-freebsd-*"); do + cp "$i" alist + tar -czvf compress/"$i".tar.gz alist + rm -f alist + done for i in $(find . -type f -name "$appName-windows-*"); do cp "$i" alist.exe zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe @@ -273,8 +302,12 @@ if [ "$1" = "dev" ]; then else BuildDev fi -elif [ "$1" = "release" ]; then - FetchWebRelease +elif [ "$1" = "release" -o "$1" = "beta" ]; then + if [ "$1" = "beta" ]; then + FetchWebDev + else + FetchWebRelease + fi if [ "$2" = "docker" ]; then BuildDocker elif [ "$2" = "docker-multiplatform" ]; then @@ -288,6 +321,9 @@ elif [ "$1" = "release" ]; then elif [ "$2" = "android" ]; then BuildReleaseAndroid MakeRelease "md5-android.txt" + elif [ "$2" = "freebsd" ]; then + BuildReleaseFreeBSD + MakeRelease "md5-freebsd.txt" elif [ "$2" = "web" ]; then echo "web only" else diff --git a/cmd/common.go b/cmd/common.go index b4a7081c..8a73f9b0 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -17,7 +17,9 @@ func Init() { bootstrap.Log() bootstrap.InitDB() data.InitData() + bootstrap.InitStreamLimit() bootstrap.InitIndex() + bootstrap.InitUpgradePatch() } func Release() { diff --git a/cmd/kill.go b/cmd/kill.go new file mode 100644 index 00000000..3378fd70 --- /dev/null +++ b/cmd/kill.go @@ -0,0 +1,54 @@ +package cmd + +import ( + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "os" +) + +// KillCmd represents the kill command +var KillCmd = &cobra.Command{ + Use: "kill", + Short: "Force kill alist server process by daemon/pid file", + Run: func(cmd *cobra.Command, args []string) { + kill() + }, +} + +func kill() { + initDaemon() + if pid == -1 { + log.Info("Seems not have been started. Try use `alist start` to start server.") + return + } + process, err := os.FindProcess(pid) + if err != nil { + log.Errorf("failed to find process by pid: %d, reason: %v", pid, process) + return + } + err = process.Kill() + if err != nil { + log.Errorf("failed to kill process %d: %v", pid, err) + } else { + log.Info("killed process: ", pid) + } + err = os.Remove(pidFile) + if err != nil { + log.Errorf("failed to remove pid file") + } + pid = -1 +} + +func init() { + RootCmd.AddCommand(KillCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // stopCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/cmd/lang.go b/cmd/lang.go index 56ef037b..5d8ce837 100644 --- a/cmd/lang.go +++ b/cmd/lang.go @@ -12,6 +12,7 @@ import ( "strings" _ "github.com/alist-org/alist/v3/drivers" + "github.com/alist-org/alist/v3/internal/bootstrap" "github.com/alist-org/alist/v3/internal/bootstrap/data" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/op" @@ -137,6 +138,7 @@ var LangCmd = &cobra.Command{ Use: "lang", Short: "Generate language json file", Run: func(cmd *cobra.Command, args []string) { + bootstrap.InitConfig() err := os.MkdirAll("lang", 0777) if err != nil { utils.Log.Fatalf("failed create folder: %s", err.Error()) diff --git a/cmd/root.go b/cmd/root.go index 6bd82b7a..59eb989c 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -6,6 +6,7 @@ import ( "github.com/alist-org/alist/v3/cmd/flags" _ "github.com/alist-org/alist/v3/drivers" + _ "github.com/alist-org/alist/v3/internal/archive" _ "github.com/alist-org/alist/v3/internal/offline_download" "github.com/spf13/cobra" ) diff --git a/cmd/server.go b/cmd/server.go index 8a7beafa..4263f020 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -13,14 +13,19 @@ import ( "syscall" "time" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/KirCute/sftpd-alist" "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/internal/bootstrap" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server" "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" ) // ServerCmd represents the server command @@ -44,11 +49,15 @@ the address is defined in config file`, r := gin.New() r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out)) server.Init(r) + var httpHandler http.Handler = r + if conf.Conf.Scheme.EnableH2c { + httpHandler = h2c.NewHandler(r, &http2.Server{}) + } var httpSrv, httpsSrv, unixSrv *http.Server if conf.Conf.Scheme.HttpPort != -1 { httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort) utils.Log.Infof("start HTTP server @ %s", httpBase) - httpSrv = &http.Server{Addr: httpBase, Handler: r} + httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler} go func() { err := httpSrv.ListenAndServe() if err != nil && !errors.Is(err, http.ErrServerClosed) { @@ -69,7 +78,7 @@ the address is defined in config file`, } if conf.Conf.Scheme.UnixFile != "" { utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile) - unixSrv = &http.Server{Handler: r} + unixSrv = &http.Server{Handler: httpHandler} go func() { listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile) if err != nil { @@ -112,6 +121,42 @@ the address is defined in config file`, } }() } + var ftpDriver *server.FtpMainDriver + var ftpServer *ftpserver.FtpServer + if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable { + var err error + ftpDriver, err = server.NewMainDriver() + if err != nil { + utils.Log.Fatalf("failed to start ftp driver: %s", err.Error()) + } else { + utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen) + go func() { + ftpServer = ftpserver.NewFtpServer(ftpDriver) + err = ftpServer.ListenAndServe() + if err != nil { + utils.Log.Fatalf("problem ftp server listening: %s", err.Error()) + } + }() + } + } + var sftpDriver *server.SftpDriver + var sftpServer *sftpd.SftpServer + if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable { + var err error + sftpDriver, err = server.NewSftpDriver() + if err != nil { + utils.Log.Fatalf("failed to start sftp driver: %s", err.Error()) + } else { + utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen) + go func() { + sftpServer = sftpd.NewSftpServer(sftpDriver) + err = sftpServer.RunServer() + if err != nil { + utils.Log.Fatalf("problem sftp server listening: %s", err.Error()) + } + }() + } + } // Wait for interrupt signal to gracefully shutdown the server with // a timeout of 1 second. quit := make(chan os.Signal, 1) @@ -121,6 +166,7 @@ the address is defined in config file`, signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) <-quit utils.Log.Println("Shutdown server...") + fs.ArchiveContentUploadTaskManager.RemoveAll() Release() ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() @@ -152,6 +198,25 @@ the address is defined in config file`, } }() } + if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil { + wg.Add(1) + go func() { + defer wg.Done() + ftpDriver.Stop() + if err := ftpServer.Stop(); err != nil { + utils.Log.Fatal("FTP server shutdown err: ", err) + } + }() + } + if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil { + wg.Add(1) + go func() { + defer wg.Done() + if err := sftpServer.Close(); err != nil { + utils.Log.Fatal("SFTP server shutdown err: ", err) + } + }() + } wg.Wait() utils.Log.Println("Server exit") }, diff --git a/cmd/stop.go b/cmd/stop_default.go similarity index 87% rename from cmd/stop.go rename to cmd/stop_default.go index 09fba7b7..8f133940 100644 --- a/cmd/stop.go +++ b/cmd/stop_default.go @@ -1,10 +1,10 @@ -/* -Copyright © 2022 NAME HERE -*/ +//go:build !windows + package cmd import ( "os" + "syscall" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -30,11 +30,11 @@ func stop() { log.Errorf("failed to find process by pid: %d, reason: %v", pid, process) return } - err = process.Kill() + err = process.Signal(syscall.SIGTERM) if err != nil { - log.Errorf("failed to kill process %d: %v", pid, err) + log.Errorf("failed to terminate process %d: %v", pid, err) } else { - log.Info("killed process: ", pid) + log.Info("terminated process: ", pid) } err = os.Remove(pidFile) if err != nil { diff --git a/cmd/stop_windows.go b/cmd/stop_windows.go new file mode 100644 index 00000000..e086eab1 --- /dev/null +++ b/cmd/stop_windows.go @@ -0,0 +1,34 @@ +//go:build windows + +package cmd + +import ( + "github.com/spf13/cobra" +) + +// StopCmd represents the stop command +var StopCmd = &cobra.Command{ + Use: "stop", + Short: "Same as the kill command", + Run: func(cmd *cobra.Command, args []string) { + stop() + }, +} + +func stop() { + kill() +} + +func init() { + RootCmd.AddCommand(StopCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // stopCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/cmd/version.go b/cmd/version.go index cdf4d71f..a758816e 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -6,6 +6,7 @@ package cmd import ( "fmt" "os" + "runtime" "github.com/alist-org/alist/v3/internal/conf" "github.com/spf13/cobra" @@ -16,14 +17,15 @@ var VersionCmd = &cobra.Command{ Use: "version", Short: "Show current version of AList", Run: func(cmd *cobra.Command, args []string) { + goVersion := fmt.Sprintf("%s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Printf(`Built At: %s Go Version: %s Author: %s Commit ID: %s Version: %s WebVersion: %s -`, - conf.BuiltAt, conf.GoVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion) +`, conf.BuiltAt, goVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion) os.Exit(0) }, } diff --git a/drivers/115/appver.go b/drivers/115/appver.go new file mode 100644 index 00000000..78e11a54 --- /dev/null +++ b/drivers/115/appver.go @@ -0,0 +1,43 @@ +package _115 + +import ( + driver115 "github.com/SheltonZhu/115driver/pkg/driver" + "github.com/alist-org/alist/v3/drivers/base" + log "github.com/sirupsen/logrus" +) + +var ( + md5Salt = "Qclm8MGWUv59TnrR0XPg" + appVer = "27.0.5.7" +) + +func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) { + result := driver115.VersionResp{} + resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion) + + err = driver115.CheckErr(err, &result, resp) + if err != nil { + return nil, err + } + + return result.Data.GetAppVersions(), nil +} + +func (d *Pan115) getAppVer() string { + // todo add some cache? + vers, err := d.getAppVersion() + if err != nil { + log.Warnf("[115] get app version failed: %v", err) + return appVer + } + for _, ver := range vers { + if ver.AppName == "win" { + return ver.Version + } + } + return appVer +} + +func (d *Pan115) initAppVer() { + appVer = d.getAppVer() +} diff --git a/drivers/115/driver.go b/drivers/115/driver.go index 2a1c8dee..0dcb64d8 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -3,6 +3,7 @@ package _115 import ( "context" "strings" + "sync" driver115 "github.com/SheltonZhu/115driver/pkg/driver" "github.com/alist-org/alist/v3/internal/driver" @@ -16,8 +17,9 @@ import ( type Pan115 struct { model.Storage Addition - client *driver115.Pan115Client - limiter *rate.Limiter + client *driver115.Pan115Client + limiter *rate.Limiter + appVerOnce sync.Once } func (d *Pan115) Config() driver.Config { @@ -29,6 +31,7 @@ func (d *Pan115) GetAddition() driver.Additional { } func (d *Pan115) Init(ctx context.Context) error { + d.appVerOnce.Do(d.initAppVer) if d.LimitRate > 0 { d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1) } @@ -76,28 +79,60 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) return link, nil } -func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { +func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { - return err + return nil, err } - if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil { - return err + + result := driver115.MkdirResp{} + form := map[string]string{ + "pid": parentDir.GetID(), + "cname": dirName, } - return nil + req := d.client.NewRequest(). + SetFormData(form). + SetResult(&result). + ForceContentType("application/json;charset=UTF-8") + + resp, err := req.Post(driver115.ApiDirAdd) + + err = driver115.CheckErr(err, &result, resp) + if err != nil { + return nil, err + } + f, err := d.getNewFile(result.FileID) + if err != nil { + return nil, nil + } + return f, nil } -func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error { +func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { - return err + return nil, err } - return d.client.Move(dstDir.GetID(), srcObj.GetID()) + if err := d.client.Move(dstDir.GetID(), srcObj.GetID()); err != nil { + return nil, err + } + f, err := d.getNewFile(srcObj.GetID()) + if err != nil { + return nil, nil + } + return f, nil } -func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error { +func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { - return err + return nil, err } - return d.client.Rename(srcObj.GetID(), newName) + if err := d.client.Rename(srcObj.GetID(), newName); err != nil { + return nil, err + } + f, err := d.getNewFile((srcObj.GetID())) + if err != nil { + return nil, nil + } + return f, nil } func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { @@ -114,9 +149,9 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error { return d.client.Delete(obj.GetID()) } -func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { - return err + return nil, err } var ( @@ -125,10 +160,10 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr ) if ok, err := d.client.UploadAvailable(); err != nil || !ok { - return err + return nil, err } if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit { - return driver115.ErrUploadTooLarge + return nil, driver115.ErrUploadTooLarge } //if digest, err = d.client.GetDigestResult(stream); err != nil { // return err @@ -141,22 +176,22 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr } reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize}) if err != nil { - return err + return nil, err } preHash, err := utils.HashReader(utils.SHA1, reader) if err != nil { - return err + return nil, err } preHash = strings.ToUpper(preHash) fullHash := stream.GetHash().GetHash(utils.SHA1) if len(fullHash) <= 0 { tmpF, err := stream.CacheFullInTempFile() if err != nil { - return err + return nil, err } fullHash, err = utils.HashFile(utils.SHA1, tmpF) if err != nil { - return err + return nil, err } } fullHash = strings.ToUpper(fullHash) @@ -165,20 +200,36 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr // note that 115 add timeout for rapid-upload, // and "sig invalid" err is thrown even when the hash is correct after timeout. if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil { - return err + return nil, err } if matched, err := fastInfo.Ok(); err != nil { - return err + return nil, err } else if matched { - return nil + f, err := d.getNewFileByPickCode(fastInfo.PickCode) + if err != nil { + return nil, nil + } + return f, nil } + var uploadResult *UploadResult // 闪传失败,上传 - if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传 - return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID) + if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传 + if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil { + return nil, err + } + } else { + // 分片上传 + if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil { + return nil, err + } } - // 分片上传 - return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID) + + file, err := d.getNewFile(uploadResult.Data.FileID) + if err != nil { + return nil, nil + } + return file, nil } func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) { @@ -190,7 +241,7 @@ func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, err } func (d *Pan115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) { - return d.client.AddOfflineTaskURIs(uris, dstDir.GetID()) + return d.client.AddOfflineTaskURIs(uris, dstDir.GetID(), driver115.WithAppVer(appVer)) } func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, deleteFiles bool) error { diff --git a/drivers/115/meta.go b/drivers/115/meta.go index 38c1742a..bcea1749 100644 --- a/drivers/115/meta.go +++ b/drivers/115/meta.go @@ -9,8 +9,8 @@ type Addition struct { Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"` QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` - PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"` - LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` + PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` + LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"` driver.RootID } diff --git a/drivers/115/types.go b/drivers/115/types.go index 830e347b..40b951d8 100644 --- a/drivers/115/types.go +++ b/drivers/115/types.go @@ -1,10 +1,11 @@ package _115 import ( + "time" + "github.com/SheltonZhu/115driver/pkg/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" - "time" ) var _ model.Obj = (*FileObj)(nil) @@ -20,3 +21,18 @@ func (f *FileObj) CreateTime() time.Time { func (f *FileObj) GetHash() utils.HashInfo { return utils.NewHashInfo(utils.SHA1, f.Sha1) } + +type UploadResult struct { + driver.BasicResp + Data struct { + PickCode string `json:"pick_code"` + FileSize int `json:"file_size"` + FileID string `json:"file_id"` + ThumbURL string `json:"thumb_url"` + Sha1 string `json:"sha1"` + Aid int `json:"aid"` + FileName string `json:"file_name"` + Cid string `json:"cid"` + IsVideo int `json:"is_video"` + } `json:"data"` +} diff --git a/drivers/115/util.go b/drivers/115/util.go index 992502c4..fc17fe3c 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -2,36 +2,39 @@ package _115 import ( "bytes" + "context" + "crypto/md5" "crypto/tls" + "encoding/hex" "encoding/json" "fmt" "io" "net/http" "net/url" - "path/filepath" "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" "github.com/aliyun/aliyun-oss-go-sdk/oss" + cipher "github.com/SheltonZhu/115driver/pkg/crypto/ec115" + crypto "github.com/SheltonZhu/115driver/pkg/crypto/m115" driver115 "github.com/SheltonZhu/115driver/pkg/driver" - crypto "github.com/gaoyb7/115drive-webdav/115" - "github.com/orzogc/fake115uploader/cipher" "github.com/pkg/errors" ) -var UserAgent = driver115.UA115Browser - +// var UserAgent = driver115.UA115Browser func (d *Pan115) login() error { var err error opts := []driver115.Option{ - driver115.UA(UserAgent), + driver115.UA(d.getUA()), func(c *driver115.Pan115Client) { c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}) }, @@ -45,7 +48,7 @@ func (d *Pan115) login() error { if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil { return errors.Wrap(err, "failed to login by qrcode") } - d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID) + d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID) d.QRCodeToken = "" } else if d.Cookie != "" { if err = cr.FromCookie(d.Cookie); err != nil { @@ -63,7 +66,7 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) { if d.PageSize <= 0 { d.PageSize = driver115.FileListLimit } - files, err := d.client.ListWithLimit(fileId, d.PageSize) + files, err := d.client.ListWithLimit(fileId, d.PageSize, driver115.WithMultiUrls()) if err != nil { return nil, err } @@ -73,14 +76,42 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) { return res, nil } -const ( - appVer = "2.0.3.6" -) +func (d *Pan115) getNewFile(fileId string) (*FileObj, error) { + file, err := d.client.GetFile(fileId) + if err != nil { + return nil, err + } + return &FileObj{*file}, nil +} -func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) { +func (d *Pan115) getNewFileByPickCode(pickCode string) (*FileObj, error) { + result := driver115.GetFileInfoResponse{} + req := d.client.NewRequest(). + SetQueryParam("pick_code", pickCode). + ForceContentType("application/json;charset=UTF-8"). + SetResult(&result) + resp, err := req.Get(driver115.ApiFileInfo) + if err := driver115.CheckErr(err, &result, resp); err != nil { + return nil, err + } + if len(result.Files) == 0 { + return nil, errors.New("not get file info") + } + fileInfo := result.Files[0] + + f := &FileObj{} + f.From(fileInfo) + return f, nil +} + +func (d *Pan115) getUA() string { + return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer) +} + +func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) { key := crypto.GenerateKey() result := driver115.DownloadResp{} - params, err := utils.Json.Marshal(map[string]string{"pickcode": pickCode}) + params, err := utils.Json.Marshal(map[string]string{"pick_code": pickCode}) if err != nil { return nil, err } @@ -88,13 +119,13 @@ func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e data := crypto.Encode(params, key) bodyReader := strings.NewReader(url.Values{"data": []string{data}}.Encode()) - reqUrl := fmt.Sprintf("%s?t=%s", driver115.ApiDownloadGetUrl, driver115.Now().String()) + reqUrl := fmt.Sprintf("%s?t=%s", driver115.AndroidApiDownloadGetUrl, driver115.Now().String()) req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - req.Header.Set("Cookie", c.Cookie) + req.Header.Set("Cookie", d.Cookie) req.Header.Set("User-Agent", ua) - resp, err := c.client.Client.GetClient().Do(req) + resp, err := d.client.Client.GetClient().Do(req) if err != nil { return nil, err } @@ -112,24 +143,30 @@ func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e return nil, err } - bytes, err := crypto.Decode(string(result.EncodedData), key) + b, err := crypto.Decode(string(result.EncodedData), key) if err != nil { return nil, err } - downloadInfo := driver115.DownloadData{} - if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil { + downloadInfo := struct { + Url string `json:"url"` + }{} + if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil { return nil, err } - for _, info := range downloadInfo { - if info.FileSize < 0 { - return nil, driver115.ErrDownloadEmpty - } - info.Header = resp.Request.Header - return info, nil - } - return nil, driver115.ErrUnexpected + info := &driver115.DownloadInfo{} + info.PickCode = pickCode + info.Header = resp.Request.Header + info.Url.Url = downloadInfo.Url + return info, nil +} + +func (c *Pan115) GenerateToken(fileID, preID, timeStamp, fileSize, signKey, signVal string) string { + userID := strconv.FormatInt(c.client.UserID, 10) + userIDMd5 := md5.Sum([]byte(userID)) + tokenMd5 := md5.Sum([]byte(md5Salt + fileID + fileSize + signKey + signVal + userID + timeStamp + hex.EncodeToString(userIDMd5[:]) + appVer)) + return hex.EncodeToString(tokenMd5[:]) } func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) { @@ -161,7 +198,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri signKey, signVal := "", "" for retry := true; retry; { - t := driver115.Now() + t := driver115.NowMilli() if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil { return nil, err @@ -172,7 +209,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri } form.Set("t", t.String()) - form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal)) + form.Set("token", d.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal)) if signKey != "" && signVal != "" { form.Set("sign_key", signKey) form.Set("sign_val", signVal) @@ -225,6 +262,9 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri length := end - start + 1 reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length}) + if err != nil { + return "", err + } hashStr, err := utils.HashReader(utils.SHA1, reader) if err != nil { return "", err @@ -233,8 +273,43 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri return } +// UploadByOSS use aliyun sdk to upload +func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) { + ossToken, err := c.client.GetOSSToken() + if err != nil { + return nil, err + } + ossClient, err := oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret) + if err != nil { + return nil, err + } + bucket, err := ossClient.Bucket(params.Bucket) + if err != nil { + return nil, err + } + + var bodyBytes []byte + r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) + if err = bucket.PutObject(params.Object, r, append( + driver115.OssOption(params, ossToken), + oss.CallbackResult(&bodyBytes), + )...); err != nil { + return nil, err + } + + var uploadResult UploadResult + if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil { + return nil, err + } + return &uploadResult, uploadResult.Err(string(bodyBytes)) +} + // UploadByMultipart upload by mutipart blocks -func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error { +func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer, + dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) { var ( chunks []oss.FileChunk parts []oss.UploadPart @@ -242,12 +317,13 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i ossClient *oss.Client bucket *oss.Bucket ossToken *driver115.UploadOSSTokenResp + bodyBytes []byte err error ) - tmpF, err := stream.CacheFullInTempFile() + tmpF, err := s.CacheFullInTempFile() if err != nil { - return err + return nil, err } options := driver115.DefalutUploadMultipartOptions() @@ -256,17 +332,19 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i f(options) } } + // oss 启用Sequential必须按顺序上传 + options.ThreadsNum = 1 if ossToken, err = d.client.GetOSSToken(); err != nil { - return err + return nil, err } - if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil { - return err + if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret, oss.EnableMD5(true), oss.EnableCRC(true)); err != nil { + return nil, err } if bucket, err = ossClient.Bucket(params.Bucket); err != nil { - return err + return nil, err } // ossToken一小时后就会失效,所以每50分钟重新获取一次 @@ -276,14 +354,15 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i timeout := time.NewTimer(options.Timeout) if chunks, err = SplitFile(fileSize); err != nil { - return err + return nil, err } if imur, err = bucket.InitiateMultipartUpload(params.Object, oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken), oss.UserAgentHeader(driver115.OSSUserAgent), + oss.EnableSha1(), oss.Sequential(), ); err != nil { - return err + return nil, err } wg := sync.WaitGroup{} @@ -301,6 +380,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i quit <- struct{}{} }() + completedNum := atomic.Int32{} // consumers for i := 0; i < options.ThreadsNum; i++ { go func(threadId int) { @@ -313,25 +393,28 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i var part oss.UploadPart // 出现错误就继续尝试,共尝试3次 for retry := 0; retry < 3; retry++ { select { + case <-ctx.Done(): + break case <-ticker.C: if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken errCh <- errors.Wrap(err, "刷新token时出现错误") } default: } - buf := make([]byte, chunk.Size) if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) { continue } - - b := bytes.NewBuffer(buf) - if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { + if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)), + chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { break } } if err != nil { - errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err)) + errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err)) + } else { + num := completedNum.Add(1) + up(float64(num) * 100.0 / float64(len(chunks))) } UploadedPartsCh <- part } @@ -350,25 +433,31 @@ LOOP: case <-ticker.C: // 到时重新获取ossToken if ossToken, err = d.client.GetOSSToken(); err != nil { - return err + return nil, err } case <-quit: break LOOP case <-errCh: - return err + return nil, err case <-timeout.C: - return fmt.Errorf("time out") + return nil, fmt.Errorf("time out") } } - // EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的 - if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) { - // 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的 - if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") { - return err - } + // 不知道啥原因,oss那边分片上传不计算sha1,导致115服务器校验错误 + // params.Callback.Callback = strings.ReplaceAll(params.Callback.Callback, "${sha1}", params.SHA1) + if _, err := bucket.CompleteMultipartUpload(imur, parts, append( + driver115.OssOption(params, ossToken), + oss.CallbackResult(&bodyBytes), + )...); err != nil { + return nil, err } - return d.checkUploadStatus(dirID, params.SHA1) + + var uploadResult UploadResult + if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil { + return nil, err + } + return &uploadResult, uploadResult.Err(string(bodyBytes)) } func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) { @@ -377,27 +466,6 @@ func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) { } } -func (d *Pan115) checkUploadStatus(dirID, sha1 string) error { - // 验证上传是否成功 - req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8") - opts := []driver115.GetFileOptions{ - driver115.WithOrder(driver115.FileOrderByTime), - driver115.WithShowDirEnable(false), - driver115.WithAsc(false), - driver115.WithLimit(500), - } - fResp, err := driver115.GetFiles(req, dirID, opts...) - if err != nil { - return err - } - for _, fileInfo := range fResp.Files { - if fileInfo.Sha1 == sha1 { - return nil - } - } - return driver115.ErrUploadFailed -} - func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) { for i := int64(1); i < 10; i++ { if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片 diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go new file mode 100644 index 00000000..6121d3b2 --- /dev/null +++ b/drivers/115_open/driver.go @@ -0,0 +1,335 @@ +package _115_open + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/alist-org/alist/v3/cmd/flags" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" + sdk "github.com/xhofe/115-sdk-go" + "golang.org/x/time/rate" +) + +type Open115 struct { + model.Storage + Addition + client *sdk.Client + limiter *rate.Limiter +} + +func (d *Open115) Config() driver.Config { + return config +} + +func (d *Open115) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Open115) Init(ctx context.Context) error { + d.client = sdk.New(sdk.WithRefreshToken(d.Addition.RefreshToken), + sdk.WithAccessToken(d.Addition.AccessToken), + sdk.WithOnRefreshToken(func(s1, s2 string) { + d.Addition.AccessToken = s1 + d.Addition.RefreshToken = s2 + op.MustSaveDriverStorage(d) + })) + if flags.Debug || flags.Dev { + d.client.SetDebug(true) + } + _, err := d.client.UserInfo(ctx) + if err != nil { + return err + } + if d.Addition.LimitRate > 0 { + d.limiter = rate.NewLimiter(rate.Limit(d.Addition.LimitRate), 1) + } + return nil +} + +func (d *Open115) WaitLimit(ctx context.Context) error { + if d.limiter != nil { + return d.limiter.Wait(ctx) + } + return nil +} + +func (d *Open115) Drop(ctx context.Context) error { + return nil +} + +func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + var res []model.Obj + pageSize := int64(200) + offset := int64(0) + for { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } + resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{ + CID: dir.GetID(), + Limit: pageSize, + Offset: offset, + ASC: d.Addition.OrderDirection == "asc", + O: d.Addition.OrderBy, + // Cur: 1, + ShowDir: true, + }) + if err != nil { + return nil, err + } + res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj { + obj := Obj(src) + return &obj + })...) + if len(res) >= int(resp.Count) { + break + } + offset += pageSize + } + return res, nil +} + +func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } + var ua string + if args.Header != nil { + ua = args.Header.Get("User-Agent") + } + if ua == "" { + ua = base.UserAgent + } + obj, ok := file.(*Obj) + if !ok { + return nil, fmt.Errorf("can't convert obj") + } + pc := obj.Pc + resp, err := d.client.DownURL(ctx, pc, ua) + if err != nil { + return nil, err + } + u, ok := resp[obj.GetID()] + if !ok { + return nil, fmt.Errorf("can't get link") + } + return &model.Link{ + URL: u.URL.URL, + Header: http.Header{ + "User-Agent": []string{ua}, + }, + }, nil +} + +func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } + resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName) + if err != nil { + return nil, err + } + return &Obj{ + Fid: resp.FileID, + Pid: parentDir.GetID(), + Fn: dirName, + Fc: "0", + Upt: time.Now().Unix(), + Uet: time.Now().Unix(), + UpPt: time.Now().Unix(), + }, nil +} + +func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } + _, err := d.client.Move(ctx, &sdk.MoveReq{ + FileIDs: srcObj.GetID(), + ToCid: dstDir.GetID(), + }) + if err != nil { + return nil, err + } + return srcObj, nil +} + +func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } + _, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{ + FileID: srcObj.GetID(), + FileNma: newName, + }) + if err != nil { + return nil, err + } + obj, ok := srcObj.(*Obj) + if ok { + obj.Fn = newName + } + return srcObj, nil +} + +func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if err := d.WaitLimit(ctx); err != nil { + return nil, err + } + _, err := d.client.Copy(ctx, &sdk.CopyReq{ + PID: dstDir.GetID(), + FileID: srcObj.GetID(), + NoDupli: "1", + }) + if err != nil { + return nil, err + } + return srcObj, nil +} + +func (d *Open115) Remove(ctx context.Context, obj model.Obj) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + _obj, ok := obj.(*Obj) + if !ok { + return fmt.Errorf("can't convert obj") + } + _, err := d.client.DelFile(ctx, &sdk.DelFileReq{ + FileIDs: _obj.GetID(), + ParentID: _obj.Pid, + }) + if err != nil { + return err + } + return nil +} + +func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + tempF, err := file.CacheFullInTempFile() + if err != nil { + return err + } + // cal full sha1 + sha1, err := utils.HashReader(utils.SHA1, tempF) + if err != nil { + return err + } + _, err = tempF.Seek(0, io.SeekStart) + if err != nil { + return err + } + // pre 128k sha1 + sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024)) + if err != nil { + return err + } + _, err = tempF.Seek(0, io.SeekStart) + if err != nil { + return err + } + // 1. Init + resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{ + FileName: file.GetName(), + FileSize: file.GetSize(), + Target: dstDir.GetID(), + FileID: strings.ToUpper(sha1), + PreID: strings.ToUpper(sha1128k), + }) + if err != nil { + return err + } + if resp.Status == 2 { + return nil + } + // 2. two way verify + if utils.SliceContains([]int{6, 7, 8}, resp.Status) { + signCheck := strings.Split(resp.SignCheck, "-") //"sign_check": "2392148-2392298" 取2392148-2392298之间的内容(包含2392148、2392298)的sha1 + start, err := strconv.ParseInt(signCheck[0], 10, 64) + if err != nil { + return err + } + end, err := strconv.ParseInt(signCheck[1], 10, 64) + if err != nil { + return err + } + _, err = tempF.Seek(start, io.SeekStart) + if err != nil { + return err + } + signVal, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, end-start+1)) + if err != nil { + return err + } + _, err = tempF.Seek(0, io.SeekStart) + if err != nil { + return err + } + resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{ + FileName: file.GetName(), + FileSize: file.GetSize(), + Target: dstDir.GetID(), + FileID: strings.ToUpper(sha1), + PreID: strings.ToUpper(sha1128k), + SignKey: resp.SignKey, + SignVal: strings.ToUpper(signVal), + }) + if err != nil { + return err + } + if resp.Status == 2 { + return nil + } + } + // 3. get upload token + tokenResp, err := d.client.UploadGetToken(ctx) + if err != nil { + return err + } + // 4. upload + err = d.multpartUpload(ctx, tempF, file, up, tokenResp, resp) + if err != nil { + return err + } + return nil +} + +// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { +// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional +// return nil, errs.NotImplement +// } + +// func (d *Open115) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { +// // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional +// return nil, errs.NotImplement +// } + +// func (d *Open115) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { +// // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional +// return nil, errs.NotImplement +// } + +// func (d *Open115) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { +// // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional +// // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir +// // return errs.NotImplement to use an internal archive tool +// return nil, errs.NotImplement +// } + +//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*Open115)(nil) diff --git a/drivers/115_open/meta.go b/drivers/115_open/meta.go new file mode 100644 index 00000000..66b956c0 --- /dev/null +++ b/drivers/115_open/meta.go @@ -0,0 +1,37 @@ +package _115_open + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + driver.RootID + // define other + RefreshToken string `json:"refresh_token" required:"true"` + OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"` + OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"` + LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"` + AccessToken string +} + +var config = driver.Config{ + Name: "115 Open", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "0", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Open115{} + }) +} diff --git a/drivers/115_open/types.go b/drivers/115_open/types.go new file mode 100644 index 00000000..491a368e --- /dev/null +++ b/drivers/115_open/types.go @@ -0,0 +1,59 @@ +package _115_open + +import ( + "time" + + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + sdk "github.com/xhofe/115-sdk-go" +) + +type Obj sdk.GetFilesResp_File + +// Thumb implements model.Thumb. +func (o *Obj) Thumb() string { + return o.Thumbnail +} + +// CreateTime implements model.Obj. +func (o *Obj) CreateTime() time.Time { + return time.Unix(o.UpPt, 0) +} + +// GetHash implements model.Obj. +func (o *Obj) GetHash() utils.HashInfo { + return utils.NewHashInfo(utils.SHA1, o.Sha1) +} + +// GetID implements model.Obj. +func (o *Obj) GetID() string { + return o.Fid +} + +// GetName implements model.Obj. +func (o *Obj) GetName() string { + return o.Fn +} + +// GetPath implements model.Obj. +func (o *Obj) GetPath() string { + return "" +} + +// GetSize implements model.Obj. +func (o *Obj) GetSize() int64 { + return o.FS +} + +// IsDir implements model.Obj. +func (o *Obj) IsDir() bool { + return o.Fc == "0" +} + +// ModTime implements model.Obj. +func (o *Obj) ModTime() time.Time { + return time.Unix(o.Upt, 0) +} + +var _ model.Obj = (*Obj)(nil) +var _ model.Thumb = (*Obj)(nil) diff --git a/drivers/115_open/upload.go b/drivers/115_open/upload.go new file mode 100644 index 00000000..282582ef --- /dev/null +++ b/drivers/115_open/upload.go @@ -0,0 +1,140 @@ +package _115_open + +import ( + "context" + "encoding/base64" + "io" + "time" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/avast/retry-go" + sdk "github.com/xhofe/115-sdk-go" +) + +func calPartSize(fileSize int64) int64 { + var partSize int64 = 20 * utils.MB + if fileSize > partSize { + if fileSize > 1*utils.TB { // file Size over 1TB + partSize = 5 * utils.GB // file part size 5GB + } else if fileSize > 768*utils.GB { // over 768GB + partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part + } else if fileSize > 512*utils.GB { // over 512GB + partSize = 82463373 // ≈ 78.6432MB + } else if fileSize > 384*utils.GB { // over 384GB + partSize = 54975582 // ≈ 52.4288MB + } else if fileSize > 256*utils.GB { // over 256GB + partSize = 41231687 // ≈ 39.3216MB + } else if fileSize > 128*utils.GB { // over 128GB + partSize = 27487791 // ≈ 26.2144MB + } + } + return partSize +} + +func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error { + ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) + if err != nil { + return err + } + bucket, err := ossClient.Bucket(initResp.Bucket) + if err != nil { + return err + } + + err = bucket.PutObject(initResp.Object, tempF, + oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))), + oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))), + ) + + return err +} + +// type CallbackResult struct { +// State bool `json:"state"` +// Code int `json:"code"` +// Message string `json:"message"` +// Data struct { +// PickCode string `json:"pick_code"` +// FileName string `json:"file_name"` +// FileSize int64 `json:"file_size"` +// FileID string `json:"file_id"` +// ThumbURL string `json:"thumb_url"` +// Sha1 string `json:"sha1"` +// Aid int `json:"aid"` +// Cid string `json:"cid"` +// } `json:"data"` +// } + +func (d *Open115) multpartUpload(ctx context.Context, tempF model.File, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error { + fileSize := stream.GetSize() + chunkSize := calPartSize(fileSize) + + ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken)) + if err != nil { + return err + } + bucket, err := ossClient.Bucket(initResp.Bucket) + if err != nil { + return err + } + + imur, err := bucket.InitiateMultipartUpload(initResp.Object, oss.Sequential()) + if err != nil { + return err + } + + partNum := (stream.GetSize() + chunkSize - 1) / chunkSize + parts := make([]oss.UploadPart, partNum) + offset := int64(0) + for i := int64(1); i <= partNum; i++ { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + + partSize := chunkSize + if i == partNum { + partSize = fileSize - (i-1)*chunkSize + } + rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) + err = retry.Do(func() error { + _ = rd.Reset() + rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) + part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i)) + if err != nil { + return err + } + parts[i-1] = part + return nil + }, + retry.Attempts(3), + retry.DelayType(retry.BackOffDelay), + retry.Delay(time.Second)) + if err != nil { + return err + } + + if i == partNum { + offset = fileSize + } else { + offset += partSize + } + up(float64(offset) / float64(fileSize)) + } + + // callbackRespBytes := make([]byte, 1024) + _, err = bucket.CompleteMultipartUpload( + imur, + parts, + oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))), + oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))), + // oss.CallbackResult(&callbackRespBytes), + ) + if err != nil { + return err + } + + return nil +} diff --git a/drivers/115_open/util.go b/drivers/115_open/util.go new file mode 100644 index 00000000..ee021659 --- /dev/null +++ b/drivers/115_open/util.go @@ -0,0 +1,3 @@ +package _115_open + +// do others that not defined in Driver interface diff --git a/drivers/115_share/meta.go b/drivers/115_share/meta.go index 1d203b24..92f8bf0f 100644 --- a/drivers/115_share/meta.go +++ b/drivers/115_share/meta.go @@ -9,8 +9,8 @@ type Addition struct { Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"` QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` - PageSize int64 `json:"page_size" type:"number" default:"20" help:"list api per page size of 115 driver"` - LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` + PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` + LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"` ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"` driver.RootID @@ -18,7 +18,7 @@ type Addition struct { var config = driver.Config{ Name: "115 Share", - DefaultRoot: "", + DefaultRoot: "0", // OnlyProxy: true, // OnlyLocal: true, CheckStatus: false, diff --git a/drivers/115_share/utils.go b/drivers/115_share/utils.go index 812352ef..1f9e112d 100644 --- a/drivers/115_share/utils.go +++ b/drivers/115_share/utils.go @@ -96,7 +96,7 @@ func (d *Pan115Share) login() error { if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil { return errors.Wrap(err, "failed to login by qrcode") } - d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID) + d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID) d.QRCodeToken = "" } else if d.Cookie != "" { if err = cr.FromCookie(d.Cookie); err != nil { diff --git a/drivers/123/driver.go b/drivers/123/driver.go index aeda7fcf..32c053e2 100644 --- a/drivers/123/driver.go +++ b/drivers/123/driver.go @@ -2,21 +2,20 @@ package _123 import ( "context" - "crypto/md5" "encoding/base64" - "encoding/hex" "fmt" - "golang.org/x/time/rate" - "io" "net/http" "net/url" "sync" "time" + "golang.org/x/time/rate" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" @@ -41,12 +40,12 @@ func (d *Pan123) GetAddition() driver.Additional { } func (d *Pan123) Init(ctx context.Context) error { - _, err := d.request(UserInfo, http.MethodGet, nil, nil) + _, err := d.Request(UserInfo, http.MethodGet, nil, nil) return err } func (d *Pan123) Drop(ctx context.Context) error { - _, _ = d.request(Logout, http.MethodPost, func(req *resty.Request) { + _, _ = d.Request(Logout, http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{}) }, nil) return nil @@ -81,7 +80,8 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) "size": f.Size, "type": f.Type, } - resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) { + resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) { + req.SetBody(data).SetHeaders(headers) }, nil) if err != nil { @@ -134,7 +134,7 @@ func (d *Pan123) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin "size": 0, "type": 1, } - _, err := d.request(Mkdir, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) { req.SetBody(data) }, nil) return err @@ -145,7 +145,7 @@ func (d *Pan123) Move(ctx context.Context, srcObj, dstDir model.Obj) error { "fileIdList": []base.Json{{"FileId": srcObj.GetID()}}, "parentFileId": dstDir.GetID(), } - _, err := d.request(Move, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(Move, http.MethodPost, func(req *resty.Request) { req.SetBody(data) }, nil) return err @@ -157,7 +157,7 @@ func (d *Pan123) Rename(ctx context.Context, srcObj model.Obj, newName string) e "fileId": srcObj.GetID(), "fileName": newName, } - _, err := d.request(Rename, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(Rename, http.MethodPost, func(req *resty.Request) { req.SetBody(data) }, nil) return err @@ -174,7 +174,7 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error { "operation": true, "fileTrashInfoList": []File{f}, } - _, err := d.request(Trash, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(Trash, http.MethodPost, func(req *resty.Request) { req.SetBody(data) }, nil) return err @@ -183,36 +183,26 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error { } } -func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - // const DEFAULT int64 = 10485760 - h := md5.New() - // need to calculate md5 of the full content - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return err +func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + etag := file.GetHash().GetHash(utils.MD5) + var err error + if len(etag) < utils.MD5.Width { + _, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5) + if err != nil { + return err + } } - defer func() { - _ = tempFile.Close() - }() - if _, err = utils.CopyWithBuffer(h, tempFile); err != nil { - return err - } - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return err - } - etag := hex.EncodeToString(h.Sum(nil)) data := base.Json{ "driveId": 0, "duplicate": 2, // 2->覆盖 1->重命名 0->默认 "etag": etag, - "fileName": stream.GetName(), + "fileName": file.GetName(), "parentFileId": dstDir.GetID(), - "size": stream.GetSize(), + "size": file.GetSize(), "type": 0, } var resp UploadResp - res, err := d.request(UploadRequest, http.MethodPost, func(req *resty.Request) { + res, err := d.Request(UploadRequest, http.MethodPost, func(req *resty.Request) { req.SetBody(data).SetContext(ctx) }, &resp) if err != nil { @@ -223,7 +213,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return nil } if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" { - err = d.newUpload(ctx, &resp, stream, tempFile, up) + err = d.newUpload(ctx, &resp, file, up) return err } else { cfg := &aws.Config{ @@ -237,17 +227,23 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return err } uploader := s3manager.NewUploader(s) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) } input := &s3manager.UploadInput{ Bucket: &resp.Data.Bucket, Key: &resp.Data.Key, - Body: tempFile, + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + }), } _, err = uploader.UploadWithContext(ctx, input) + if err != nil { + return err + } } - _, err = d.request(UploadComplete, http.MethodPost, func(req *resty.Request) { + _, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "fileId": resp.Data.FileId, }).SetContext(ctx) diff --git a/drivers/123/upload.go b/drivers/123/upload.go index 6f6221f1..b0482a9f 100644 --- a/drivers/123/upload.go +++ b/drivers/123/upload.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "math" "net/http" "strconv" @@ -25,7 +24,7 @@ func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, star "StorageNode": upReq.Data.StorageNode, } var s3PreSignedUrls S3PreSignedURLs - _, err := d.request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) { req.SetBody(data).SetContext(ctx) }, &s3PreSignedUrls) if err != nil { @@ -44,7 +43,7 @@ func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end in "uploadId": upReq.Data.UploadId, } var s3PreSignedUrls S3PreSignedURLs - _, err := d.request(S3Auth, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(S3Auth, http.MethodPost, func(req *resty.Request) { req.SetBody(data).SetContext(ctx) }, &s3PreSignedUrls) if err != nil { @@ -63,21 +62,31 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F "key": upReq.Data.Key, "uploadId": upReq.Data.UploadId, } - _, err := d.request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) { + _, err := d.Request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) { req.SetBody(data).SetContext(ctx) }, nil) return err } -func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error { - chunkSize := int64(1024 * 1024 * 16) +func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error { + tmpF, err := file.CacheFullInTempFile() + if err != nil { + return err + } // fetch s3 pre signed urls - chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize))) + size := file.GetSize() + chunkSize := min(size, 16*utils.MB) + chunkCount := int(size / chunkSize) + lastChunkSize := size % chunkSize + if lastChunkSize > 0 { + chunkCount++ + } else { + lastChunkSize = chunkSize + } // only 1 batch is allowed - isMultipart := chunkCount > 1 batchSize := 1 getS3UploadUrl := d.getS3Auth - if isMultipart { + if chunkCount > 1 { batchSize = 10 getS3UploadUrl = d.getS3PreSignedUrls } @@ -86,10 +95,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi return ctx.Err() } start := i - end := i + batchSize - if end > chunkCount+1 { - end = chunkCount + 1 - } + end := min(i+batchSize, chunkCount+1) s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end) if err != nil { return err @@ -101,9 +107,9 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi } curSize := chunkSize if j == chunkCount { - curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize + curSize = lastChunkSize } - err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl) + err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl) if err != nil { return err } @@ -114,12 +120,12 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi return d.completeS3(ctx, upReq, file, chunkCount > 1) } -func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error { +func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error { uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] if uploadUrl == "" { return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) } - req, err := http.NewRequest("PUT", uploadUrl, reader) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader)) if err != nil { return err } @@ -142,6 +148,7 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign } s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls // retry + reader.Seek(0, io.SeekStart) return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl) } if res.StatusCode != http.StatusOK { diff --git a/drivers/123/util.go b/drivers/123/util.go index 73c73b3b..7e5a2397 100644 --- a/drivers/123/util.go +++ b/drivers/123/util.go @@ -26,8 +26,9 @@ const ( Api = "https://www.123pan.com/api" AApi = "https://www.123pan.com/a/api" BApi = "https://www.123pan.com/b/api" + LoginApi = "https://login.123pan.com/api" MainApi = BApi - SignIn = MainApi + "/user/sign_in" + SignIn = LoginApi + "/user/sign_in" Logout = MainApi + "/user/logout" UserInfo = MainApi + "/user/info" FileList = MainApi + "/file/list/new" @@ -193,7 +194,9 @@ func (d *Pan123) login() error { // return &authKey, nil //} -func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { +func (d *Pan123) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + isRetry := false +do: req := base.RestyClient.R() req.SetHeaders(map[string]string{ "origin": "https://www.123pan.com", @@ -222,12 +225,13 @@ func (d *Pan123) request(url string, method string, callback base.ReqCallback, r body := res.Body() code := utils.Json.Get(body, "code").ToInt() if code != 0 { - if code == 401 { + if !isRetry && code == 401 { err := d.login() if err != nil { return nil, err } - return d.request(url, method, callback, resp) + isRetry = true + goto do } return nil, errors.New(jsoniter.Get(body, "message").ToString()) } @@ -259,7 +263,7 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([] "operateType": "4", "inDirectSpace": "false", } - _res, err := d.request(FileList, http.MethodGet, func(req *resty.Request) { + _res, err := d.Request(FileList, http.MethodGet, func(req *resty.Request) { req.SetQueryParams(query) }, &resp) if err != nil { diff --git a/drivers/123_share/driver.go b/drivers/123_share/driver.go index 9c1f3803..640fb749 100644 --- a/drivers/123_share/driver.go +++ b/drivers/123_share/driver.go @@ -4,12 +4,14 @@ import ( "context" "encoding/base64" "fmt" - "golang.org/x/time/rate" "net/http" "net/url" "sync" "time" + "golang.org/x/time/rate" + + _123 "github.com/alist-org/alist/v3/drivers/123" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -23,6 +25,7 @@ type Pan123Share struct { model.Storage Addition apiRateLimit sync.Map + ref *_123.Pan123 } func (d *Pan123Share) Config() driver.Config { @@ -39,7 +42,17 @@ func (d *Pan123Share) Init(ctx context.Context) error { return nil } +func (d *Pan123Share) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*_123.Pan123) + if ok { + d.ref = refStorage + return nil + } + return fmt.Errorf("ref: storage is not 123Pan") +} + func (d *Pan123Share) Drop(ctx context.Context) error { + d.ref = nil return nil } diff --git a/drivers/123_share/util.go b/drivers/123_share/util.go index 80ea8f0c..c2140bf6 100644 --- a/drivers/123_share/util.go +++ b/drivers/123_share/util.go @@ -53,6 +53,9 @@ func GetApi(rawUrl string) string { } func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + if d.ref != nil { + return d.ref.Request(url, method, callback, resp) + } req := base.RestyClient.R() req.SetHeaders(map[string]string{ "origin": "https://www.123pan.com", diff --git a/drivers/139/driver.go b/drivers/139/driver.go index d33c3d77..a57609bc 100644 --- a/drivers/139/driver.go +++ b/drivers/139/driver.go @@ -2,28 +2,32 @@ package _139 import ( "context" - "encoding/base64" + "encoding/xml" "fmt" "io" "net/http" + "path" "strconv" - "strings" "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/pkg/utils" + streamPkg "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/cron" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/pkg/utils/random" log "github.com/sirupsen/logrus" ) type Yun139 struct { model.Storage Addition - cron *cron.Cron - Account string + cron *cron.Cron + Account string + ref *Yun139 + PersonalCloudHost string } func (d *Yun139) Config() driver.Config { @@ -35,56 +39,79 @@ func (d *Yun139) GetAddition() driver.Additional { } func (d *Yun139) Init(ctx context.Context) error { - if d.Authorization == "" { - return fmt.Errorf("authorization is empty") - } - d.cron = cron.NewCron(time.Hour * 24 * 7) - d.cron.Do(func() { + if d.ref == nil { + if len(d.Authorization) == 0 { + return fmt.Errorf("authorization is empty") + } err := d.refreshToken() if err != nil { - log.Errorf("%+v", err) + return err } - }) + + // Query Route Policy + var resp QueryRoutePolicyResp + _, err = d.requestRoute(base.Json{ + "userInfo": base.Json{ + "userType": 1, + "accountType": 1, + "accountName": d.Account}, + "modAddrType": 1, + }, &resp) + if err != nil { + return err + } + for _, policyItem := range resp.Data.RoutePolicyList { + if policyItem.ModName == "personal" { + d.PersonalCloudHost = policyItem.HttpsUrl + break + } + } + if len(d.PersonalCloudHost) == 0 { + return fmt.Errorf("PersonalCloudHost is empty") + } + + d.cron = cron.NewCron(time.Hour * 12) + d.cron.Do(func() { + err := d.refreshToken() + if err != nil { + log.Errorf("%+v", err) + } + }) + } switch d.Addition.Type { case MetaPersonalNew: if len(d.Addition.RootFolderID) == 0 { d.RootFolderID = "/" } - return nil case MetaPersonal: if len(d.Addition.RootFolderID) == 0 { d.RootFolderID = "root" } - fallthrough + case MetaGroup: + if len(d.Addition.RootFolderID) == 0 { + d.RootFolderID = d.CloudID + } case MetaFamily: - decode, err := base64.StdEncoding.DecodeString(d.Authorization) - if err != nil { - return err - } - decodeStr := string(decode) - splits := strings.Split(decodeStr, ":") - if len(splits) < 2 { - return fmt.Errorf("authorization is invalid, splits < 2") - } - d.Account = splits[1] - _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{ - "qryUserExternInfoReq": base.Json{ - "commonAccountInfo": base.Json{ - "account": d.Account, - "accountType": 1, - }, - }, - }, nil) - return err default: return errs.NotImplement } + return nil +} + +func (d *Yun139) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*Yun139) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport } func (d *Yun139) Drop(ctx context.Context) error { if d.cron != nil { d.cron.Stop() } + d.ref = nil return nil } @@ -96,6 +123,8 @@ func (d *Yun139) List(ctx context.Context, dir model.Obj, args model.ListArgs) ( return d.getFiles(dir.GetID()) case MetaFamily: return d.familyGetFiles(dir.GetID()) + case MetaGroup: + return d.groupGetFiles(dir.GetID()) default: return nil, errs.NotImplement } @@ -108,9 +137,11 @@ func (d *Yun139) Link(ctx context.Context, file model.Obj, args model.LinkArgs) case MetaPersonalNew: url, err = d.personalGetLink(file.GetID()) case MetaPersonal: - fallthrough - case MetaFamily: url, err = d.getLink(file.GetID()) + case MetaFamily: + url, err = d.familyGetLink(file.GetID(), file.GetPath()) + case MetaGroup: + url, err = d.groupGetLink(file.GetID(), file.GetPath()) default: return nil, errs.NotImplement } @@ -131,7 +162,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin "type": "folder", "fileRenameMode": "force_rename", } - pathname := "/hcy/file/create" + pathname := "/file/create" _, err = d.personalPost(pathname, data, nil) case MetaPersonal: data := base.Json{ @@ -139,7 +170,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin "parentCatalogID": parentDir.GetID(), "newCatalogName": dirName, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, }, @@ -150,12 +181,26 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin data := base.Json{ "cloudID": d.CloudID, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, "docLibName": dirName, + "path": path.Join(parentDir.GetPath(), parentDir.GetID()), } - pathname := "/orchestration/familyCloud/cloudCatalog/v1.0/createCloudDoc" + pathname := "/orchestration/familyCloud-rebuild/cloudCatalog/v1.0/createCloudDoc" + _, err = d.post(pathname, data, nil) + case MetaGroup: + data := base.Json{ + "catalogName": dirName, + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + "groupID": d.CloudID, + "parentFileId": parentDir.GetID(), + "path": path.Join(parentDir.GetPath(), parentDir.GetID()), + } + pathname := "/orchestration/group-rebuild/catalog/v1.0/createGroupCatalog" _, err = d.post(pathname, data, nil) default: err = errs.NotImplement @@ -170,12 +215,40 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, "fileIds": []string{srcObj.GetID()}, "toParentFileId": dstDir.GetID(), } - pathname := "/hcy/file/batchMove" + pathname := "/file/batchMove" _, err := d.personalPost(pathname, data, nil) if err != nil { return nil, err } return srcObj, nil + case MetaGroup: + var contentList []string + var catalogList []string + if srcObj.IsDir() { + catalogList = append(catalogList, srcObj.GetID()) + } else { + contentList = append(contentList, srcObj.GetID()) + } + data := base.Json{ + "taskType": 3, + "srcType": 2, + "srcGroupID": d.CloudID, + "destType": 2, + "destGroupID": d.CloudID, + "destPath": dstDir.GetPath(), + "contentList": contentList, + "catalogList": catalogList, + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + } + pathname := "/orchestration/group-rebuild/task/v1.0/createBatchOprTask" + _, err := d.post(pathname, data, nil) + if err != nil { + return nil, err + } + return srcObj, nil case MetaPersonal: var contentInfoList []string var catalogInfoList []string @@ -194,7 +267,7 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, "newCatalogID": dstDir.GetID(), }, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, }, @@ -219,7 +292,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "name": newName, "description": "", } - pathname := "/hcy/file/update" + pathname := "/file/update" _, err = d.personalPost(pathname, data, nil) case MetaPersonal: var data base.Json @@ -229,7 +302,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "catalogID": srcObj.GetID(), "catalogName": newName, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -239,13 +312,72 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e "contentID": srcObj.GetID(), "contentName": newName, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } pathname = "/orchestration/personalCloud/content/v1.0/updateContentInfo" } _, err = d.post(pathname, data, nil) + case MetaGroup: + var data base.Json + var pathname string + if srcObj.IsDir() { + data = base.Json{ + "groupID": d.CloudID, + "modifyCatalogID": srcObj.GetID(), + "modifyCatalogName": newName, + "path": srcObj.GetPath(), + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + } + pathname = "/orchestration/group-rebuild/catalog/v1.0/modifyGroupCatalog" + } else { + data = base.Json{ + "groupID": d.CloudID, + "contentID": srcObj.GetID(), + "contentName": newName, + "path": srcObj.GetPath(), + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + } + pathname = "/orchestration/group-rebuild/content/v1.0/modifyGroupContent" + } + _, err = d.post(pathname, data, nil) + case MetaFamily: + var data base.Json + var pathname string + if srcObj.IsDir() { + // 网页接口不支持重命名家庭云文件夹 + // data = base.Json{ + // "catalogType": 3, + // "catalogID": srcObj.GetID(), + // "catalogName": newName, + // "commonAccountInfo": base.Json{ + // "account": d.getAccount(), + // "accountType": 1, + // }, + // "path": srcObj.GetPath(), + // } + // pathname = "/orchestration/familyCloud-rebuild/photoContent/v1.0/modifyCatalogInfo" + return errs.NotImplement + } else { + data = base.Json{ + "contentID": srcObj.GetID(), + "contentName": newName, + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + "path": srcObj.GetPath(), + } + pathname = "/orchestration/familyCloud-rebuild/photoContent/v1.0/modifyContentInfo" + } + _, err = d.post(pathname, data, nil) default: err = errs.NotImplement } @@ -260,7 +392,7 @@ func (d *Yun139) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { "fileIds": []string{srcObj.GetID()}, "toParentFileId": dstDir.GetID(), } - pathname := "/hcy/file/batchCopy" + pathname := "/file/batchCopy" _, err := d.personalPost(pathname, data, nil) return err case MetaPersonal: @@ -281,7 +413,7 @@ func (d *Yun139) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { "newCatalogID": dstDir.GetID(), }, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, }, @@ -300,9 +432,31 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { data := base.Json{ "fileIds": []string{obj.GetID()}, } - pathname := "/hcy/recyclebin/batchTrash" + pathname := "/recyclebin/batchTrash" _, err := d.personalPost(pathname, data, nil) return err + case MetaGroup: + var contentList []string + var catalogList []string + // 必须使用完整路径删除 + if obj.IsDir() { + catalogList = append(catalogList, obj.GetPath()) + } else { + contentList = append(contentList, path.Join(obj.GetPath(), obj.GetID())) + } + data := base.Json{ + "taskType": 2, + "srcGroupID": d.CloudID, + "contentList": contentList, + "catalogList": catalogList, + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + } + pathname := "/orchestration/group-rebuild/task/v1.0/createBatchOprTask" + _, err := d.post(pathname, data, nil) + return err case MetaPersonal: fallthrough case MetaFamily: @@ -323,7 +477,7 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { "catalogInfoList": catalogInfoList, }, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, }, @@ -334,13 +488,15 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { "catalogList": catalogInfoList, "contentList": contentInfoList, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, + "sourceCloudID": d.CloudID, "sourceCatalogType": 1002, "taskType": 2, + "path": obj.GetPath(), } - pathname = "/orchestration/familyCloud/batchOprTask/v1.0/createBatchOprTask" + pathname = "/orchestration/familyCloud-rebuild/batchOprTask/v1.0/createBatchOprTask" } _, err := d.post(pathname, data, nil) return err @@ -349,20 +505,15 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error { } } -const ( - _ = iota //ignore first value by assigning to blank identifier - KB = 1 << (10 * iota) - MB - GB - TB -) - -func getPartSize(size int64) int64 { - // 网盘对于分片数量存在上限 - if size/GB > 30 { - return 512 * MB +func (d *Yun139) getPartSize(size int64) int64 { + if d.CustomUploadPartSize != 0 { + return d.CustomUploadPartSize } - return 100 * MB + // 网盘对于分片数量存在上限 + if size/utils.GB > 30 { + return 512 * utils.MB + } + return 100 * utils.MB } func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { @@ -370,149 +521,288 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr case MetaPersonalNew: var err error fullHash := stream.GetHash().GetHash(utils.SHA256) - if len(fullHash) <= 0 { - tmpF, err := stream.CacheFullInTempFile() - if err != nil { - return err - } - fullHash, err = utils.HashFile(utils.SHA256, tmpF) + if len(fullHash) != utils.SHA256.Width { + _, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256) if err != nil { return err } } - // return errs.NotImplement + + size := stream.GetSize() + var partSize = d.getPartSize(size) + part := size / partSize + if size%partSize > 0 { + part++ + } else if part == 0 { + part = 1 + } + partInfos := make([]PartInfo, 0, part) + for i := int64(0); i < part; i++ { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + start := i * partSize + byteSize := size - start + if byteSize > partSize { + byteSize = partSize + } + partNumber := i + 1 + partInfo := PartInfo{ + PartNumber: partNumber, + PartSize: byteSize, + ParallelHashCtx: ParallelHashCtx{ + PartOffset: start, + }, + } + partInfos = append(partInfos, partInfo) + } + + // 筛选出前 100 个 partInfos + firstPartInfos := partInfos + if len(firstPartInfos) > 100 { + firstPartInfos = firstPartInfos[:100] + } + + // 创建任务,获取上传信息和前100个分片的上传地址 data := base.Json{ "contentHash": fullHash, "contentHashAlgorithm": "SHA256", "contentType": "application/octet-stream", "parallelUpload": false, - "partInfos": []base.Json{{ - "parallelHashCtx": base.Json{ - "partOffset": 0, - }, - "partNumber": 1, - "partSize": stream.GetSize(), - }}, - "size": stream.GetSize(), - "parentFileId": dstDir.GetID(), - "name": stream.GetName(), - "type": "file", - "fileRenameMode": "auto_rename", + "partInfos": firstPartInfos, + "size": size, + "parentFileId": dstDir.GetID(), + "name": stream.GetName(), + "type": "file", + "fileRenameMode": "auto_rename", } - pathname := "/hcy/file/create" + pathname := "/file/create" var resp PersonalUploadResp _, err = d.personalPost(pathname, data, &resp) if err != nil { return err } - if resp.Data.Exist || resp.Data.RapidUpload { + // 判断文件是否已存在 + // resp.Data.Exist: true 已存在同名文件且校验相同,云端不会重复增加文件,无需手动处理冲突 + if resp.Data.Exist { return nil } - // Progress - p := driver.NewProgress(stream.GetSize(), up) + // 判断文件是否支持快传 + // resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址 + // 快传的情况下同样需要手动处理冲突 + if resp.Data.PartInfos != nil { + // 读取前100个分片的上传地址 + uploadPartInfos := resp.Data.PartInfos - // Update Progress - r := io.TeeReader(stream, p) + // 获取后续分片的上传地址 + for i := 101; i < len(partInfos); i += 100 { + end := i + 100 + if end > len(partInfos) { + end = len(partInfos) + } + batchPartInfos := partInfos[i:end] - req, err := http.NewRequest("PUT", resp.Data.PartInfos[0].UploadUrl, r) - if err != nil { - return err - } - req = req.WithContext(ctx) - req.Header.Set("Content-Type", "application/octet-stream") - req.Header.Set("Content-Length", fmt.Sprint(stream.GetSize())) - req.Header.Set("Origin", "https://yun.139.com") - req.Header.Set("Referer", "https://yun.139.com/") - req.ContentLength = stream.GetSize() + moredata := base.Json{ + "fileId": resp.Data.FileId, + "uploadId": resp.Data.UploadId, + "partInfos": batchPartInfos, + "commonAccountInfo": base.Json{ + "account": d.getAccount(), + "accountType": 1, + }, + } + pathname := "/file/getUploadUrl" + var moreresp PersonalUploadUrlResp + _, err = d.personalPost(pathname, moredata, &moreresp) + if err != nil { + return err + } + uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...) + } - res, err := base.HttpClient.Do(req) - if err != nil { - return err + // Progress + p := driver.NewProgress(size, up) + + rateLimited := driver.NewLimitedUploadStream(ctx, stream) + // 上传所有分片 + for _, uploadPartInfo := range uploadPartInfos { + index := uploadPartInfo.PartNumber - 1 + partSize := partInfos[index].PartSize + log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos)) + limitReader := io.LimitReader(rateLimited, partSize) + + // Update Progress + r := io.TeeReader(limitReader, p) + + req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.Header.Set("Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", fmt.Sprint(partSize)) + req.Header.Set("Origin", "https://yun.139.com") + req.Header.Set("Referer", "https://yun.139.com/") + req.ContentLength = partSize + + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + _ = res.Body.Close() + log.Debugf("[139] uploaded: %+v", res) + if res.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", res.StatusCode) + } + } + + data = base.Json{ + "contentHash": fullHash, + "contentHashAlgorithm": "SHA256", + "fileId": resp.Data.FileId, + "uploadId": resp.Data.UploadId, + } + _, err = d.personalPost("/file/complete", data, nil) + if err != nil { + return err + } } - _ = res.Body.Close() - log.Debugf("%+v", res) - if res.StatusCode != http.StatusOK { - return fmt.Errorf("unexpected status code: %d", res.StatusCode) - } - - data = base.Json{ - "contentHash": fullHash, - "contentHashAlgorithm": "SHA256", - "fileId": resp.Data.FileId, - "uploadId": resp.Data.UploadId, - } - _, err = d.personalPost("/hcy/file/complete", data, nil) - if err != nil { - return err + // 处理冲突 + if resp.Data.FileName != stream.GetName() { + log.Debugf("[139] conflict detected: %s != %s", resp.Data.FileName, stream.GetName()) + // 给服务器一定时间处理数据,避免无法刷新文件列表 + time.Sleep(time.Millisecond * 500) + // 刷新并获取文件列表 + files, err := d.List(ctx, dstDir, model.ListArgs{Refresh: true}) + if err != nil { + return err + } + // 删除旧文件 + for _, file := range files { + if file.GetName() == stream.GetName() { + log.Debugf("[139] conflict: removing old: %s", file.GetName()) + // 删除前重命名旧文件,避免仍旧冲突 + err = d.Rename(ctx, file, stream.GetName()+random.String(4)) + if err != nil { + return err + } + err = d.Remove(ctx, file) + if err != nil { + return err + } + break + } + } + // 重命名新文件 + for _, file := range files { + if file.GetName() == resp.Data.FileName { + log.Debugf("[139] conflict: renaming new: %s => %s", file.GetName(), stream.GetName()) + err = d.Rename(ctx, file, stream.GetName()) + if err != nil { + return err + } + break + } + } } return nil case MetaPersonal: fallthrough case MetaFamily: + // 处理冲突 + // 获取文件列表 + files, err := d.List(ctx, dstDir, model.ListArgs{}) + if err != nil { + return err + } + // 删除旧文件 + for _, file := range files { + if file.GetName() == stream.GetName() { + log.Debugf("[139] conflict: removing old: %s", file.GetName()) + // 删除前重命名旧文件,避免仍旧冲突 + err = d.Rename(ctx, file, stream.GetName()+random.String(4)) + if err != nil { + return err + } + err = d.Remove(ctx, file) + if err != nil { + return err + } + break + } + } + var reportSize int64 + if d.ReportRealSize { + reportSize = stream.GetSize() + } else { + reportSize = 0 + } data := base.Json{ "manualRename": 2, "operation": 0, "fileCount": 1, - "totalSize": 0, // 去除上传大小限制 + "totalSize": reportSize, "uploadContentList": []base.Json{{ "contentName": stream.GetName(), - "contentSize": 0, // 去除上传大小限制 + "contentSize": reportSize, // "digest": "5a3231986ce7a6b46e408612d385bafa" }}, "parentCatalogID": dstDir.GetID(), "newCatalogName": "", "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } pathname := "/orchestration/personalCloud/uploadAndDownload/v1.0/pcUploadFileRequest" if d.isFamily() { - // data = d.newJson(base.Json{ - // "fileCount": 1, - // "manualRename": 2, - // "operation": 0, - // "path": "", - // "seqNo": "", - // "totalSize": 0, - // "uploadContentList": []base.Json{{ - // "contentName": stream.GetName(), - // "contentSize": 0, - // // "digest": "5a3231986ce7a6b46e408612d385bafa" - // }}, - // }) - // pathname = "/orchestration/familyCloud/content/v1.0/getFileUploadURL" - return errs.NotImplement + data = d.newJson(base.Json{ + "fileCount": 1, + "manualRename": 2, + "operation": 0, + "path": path.Join(dstDir.GetPath(), dstDir.GetID()), + "seqNo": random.String(32), //序列号不能为空 + "totalSize": reportSize, + "uploadContentList": []base.Json{{ + "contentName": stream.GetName(), + "contentSize": reportSize, + // "digest": "5a3231986ce7a6b46e408612d385bafa" + }}, + }) + pathname = "/orchestration/familyCloud-rebuild/content/v1.0/getFileUploadURL" } var resp UploadResp - _, err := d.post(pathname, data, &resp) + _, err = d.post(pathname, data, &resp) if err != nil { return err } + if resp.Data.Result.ResultCode != "0" { + return fmt.Errorf("get file upload url failed with result code: %s, message: %s", resp.Data.Result.ResultCode, resp.Data.Result.ResultDesc) + } + size := stream.GetSize() // Progress - p := driver.NewProgress(stream.GetSize(), up) - - var partSize = getPartSize(stream.GetSize()) - part := (stream.GetSize() + partSize - 1) / partSize - if part == 0 { + p := driver.NewProgress(size, up) + var partSize = d.getPartSize(size) + part := size / partSize + if size%partSize > 0 { + part++ + } else if part == 0 { part = 1 } + rateLimited := driver.NewLimitedUploadStream(ctx, stream) for i := int64(0); i < part; i++ { if utils.IsCanceled(ctx) { return ctx.Err() } start := i * partSize - byteSize := stream.GetSize() - start - if byteSize > partSize { - byteSize = partSize - } + byteSize := min(size-start, partSize) - limitReader := io.LimitReader(stream, byteSize) + limitReader := io.LimitReader(rateLimited, byteSize) // Update Progress r := io.TeeReader(limitReader, p) req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r) @@ -522,7 +812,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr req = req.WithContext(ctx) req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName())) - req.Header.Set("contentSize", strconv.FormatInt(stream.GetSize(), 10)) + req.Header.Set("contentSize", strconv.FormatInt(size, 10)) req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1)) req.Header.Set("uploadtaskID", resp.Data.UploadResult.UploadTaskID) req.Header.Set("rangeType", "0") @@ -532,13 +822,23 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr if err != nil { return err } - _ = res.Body.Close() - log.Debugf("%+v", res) if res.StatusCode != http.StatusOK { + res.Body.Close() return fmt.Errorf("unexpected status code: %d", res.StatusCode) } + bodyBytes, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("error reading response body: %v", err) + } + var result InterLayerUploadResult + err = xml.Unmarshal(bodyBytes, &result) + if err != nil { + return fmt.Errorf("error parsing XML: %v", err) + } + if result.ResultCode != 0 { + return fmt.Errorf("upload failed with result code: %d, message: %s", result.ResultCode, result.Msg) + } } - return nil default: return errs.NotImplement @@ -556,7 +856,7 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{}, } switch args.Method { case "video_preview": - uri = "/hcy/videoPreview/getPreviewInfo" + uri = "/videoPreview/getPreviewInfo" default: return nil, errs.NotSupport } diff --git a/drivers/139/meta.go b/drivers/139/meta.go index 56a4c1df..c02b1347 100644 --- a/drivers/139/meta.go +++ b/drivers/139/meta.go @@ -9,8 +9,11 @@ type Addition struct { //Account string `json:"account" required:"true"` Authorization string `json:"authorization" type:"text" required:"true"` driver.RootID - Type string `json:"type" type:"select" options:"personal,family,personal_new" default:"personal"` - CloudID string `json:"cloud_id"` + Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"` + CloudID string `json:"cloud_id"` + CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` + ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"` + UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"` } var config = driver.Config{ diff --git a/drivers/139/types.go b/drivers/139/types.go index f7971966..d5f025a1 100644 --- a/drivers/139/types.go +++ b/drivers/139/types.go @@ -7,6 +7,7 @@ import ( const ( MetaPersonal string = "personal" MetaFamily string = "family" + MetaGroup string = "group" MetaPersonalNew string = "personal_new" ) @@ -54,6 +55,7 @@ type Content struct { //ContentDesc string `json:"contentDesc"` //ContentType int `json:"contentType"` //ContentOrigin int `json:"contentOrigin"` + CreateTime string `json:"createTime"` UpdateTime string `json:"updateTime"` //CommentCount int `json:"commentCount"` ThumbnailURL string `json:"thumbnailURL"` @@ -141,6 +143,13 @@ type UploadResp struct { } `json:"data"` } +type InterLayerUploadResult struct { + XMLName xml.Name `xml:"result"` + Text string `xml:",chardata"` + ResultCode int `xml:"resultCode"` + Msg string `xml:"msg"` +} + type CloudContent struct { ContentID string `json:"contentID"` //Modifier string `json:"modifier"` @@ -196,6 +205,37 @@ type QueryContentListResp struct { } `json:"data"` } +type QueryGroupContentListResp struct { + BaseResp + Data struct { + Result struct { + ResultCode string `json:"resultCode"` + ResultDesc string `json:"resultDesc"` + } `json:"result"` + GetGroupContentResult struct { + ParentCatalogID string `json:"parentCatalogID"` // 根目录是"0" + CatalogList []struct { + Catalog + Path string `json:"path"` + } `json:"catalogList"` + ContentList []Content `json:"contentList"` + NodeCount int `json:"nodeCount"` // 文件+文件夹数量 + CtlgCnt int `json:"ctlgCnt"` // 文件夹数量 + ContCnt int `json:"contCnt"` // 文件数量 + } `json:"getGroupContentResult"` + } `json:"data"` +} + +type ParallelHashCtx struct { + PartOffset int64 `json:"partOffset"` +} + +type PartInfo struct { + PartNumber int64 `json:"partNumber"` + PartSize int64 `json:"partSize"` + ParallelHashCtx ParallelHashCtx `json:"parallelHashCtx"` +} + type PersonalThumbnail struct { Style string `json:"style"` Url string `json:"url"` @@ -228,6 +268,7 @@ type PersonalUploadResp struct { BaseResp Data struct { FileId string `json:"fileId"` + FileName string `json:"fileName"` PartInfos []PersonalPartInfo `json:"partInfos"` Exist bool `json:"exist"` RapidUpload bool `json:"rapidUpload"` @@ -235,11 +276,39 @@ type PersonalUploadResp struct { } } -type RefreshTokenResp struct { - XMLName xml.Name `xml:"root"` - Return string `xml:"return"` - Token string `xml:"token"` - Expiretime int32 `xml:"expiretime"` - AccessToken string `xml:"accessToken"` - Desc string `xml:"desc"` +type PersonalUploadUrlResp struct { + BaseResp + Data struct { + FileId string `json:"fileId"` + UploadId string `json:"uploadId"` + PartInfos []PersonalPartInfo `json:"partInfos"` + } +} + +type QueryRoutePolicyResp struct { + Success bool `json:"success"` + Code string `json:"code"` + Message string `json:"message"` + Data struct { + RoutePolicyList []struct { + SiteID string `json:"siteID"` + SiteCode string `json:"siteCode"` + ModName string `json:"modName"` + HttpUrl string `json:"httpUrl"` + HttpsUrl string `json:"httpsUrl"` + EnvID string `json:"envID"` + ExtInfo string `json:"extInfo"` + HashName string `json:"hashName"` + ModAddrType int `json:"modAddrType"` + } `json:"routePolicyList"` + } `json:"data"` +} + +type RefreshTokenResp struct { + XMLName xml.Name `xml:"root"` + Return string `xml:"return"` + Token string `xml:"token"` + Expiretime int32 `xml:"expiretime"` + AccessToken string `xml:"accessToken"` + Desc string `xml:"desc"` } diff --git a/drivers/139/util.go b/drivers/139/util.go index 5918e4c5..5adc39b4 100644 --- a/drivers/139/util.go +++ b/drivers/139/util.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "net/url" + "path" "sort" "strconv" "strings" @@ -13,9 +14,9 @@ import ( "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils/random" - "github.com/alist-org/alist/v3/internal/op" "github.com/go-resty/resty/v2" jsoniter "github.com/json-iterator/go" log "github.com/sirupsen/logrus" @@ -54,14 +55,38 @@ func getTime(t string) time.Time { } func (d *Yun139) refreshToken() error { - url := "https://aas.caiyun.feixin.10086.cn:443/tellin/authTokenRefresh.do" - var resp RefreshTokenResp + if d.ref != nil { + return d.ref.refreshToken() + } decode, err := base64.StdEncoding.DecodeString(d.Authorization) if err != nil { - return err + return fmt.Errorf("authorization decode failed: %s", err) } decodeStr := string(decode) splits := strings.Split(decodeStr, ":") + if len(splits) < 3 { + return fmt.Errorf("authorization is invalid, splits < 3") + } + d.Account = splits[1] + strs := strings.Split(splits[2], "|") + if len(strs) < 4 { + return fmt.Errorf("authorization is invalid, strs < 4") + } + expiration, err := strconv.ParseInt(strs[3], 10, 64) + if err != nil { + return fmt.Errorf("authorization is invalid") + } + expiration -= time.Now().UnixMilli() + if expiration > 1000*60*60*24*15 { + // Authorization有效期大于15天无需刷新 + return nil + } + if expiration < 0 { + return fmt.Errorf("authorization has expired") + } + + url := "https://aas.caiyun.feixin.10086.cn:443/tellin/authTokenRefresh.do" + var resp RefreshTokenResp reqBody := "" + splits[2] + "" + splits[1] + "656" _, err = base.RestyClient.R(). ForceContentType("application/xml"). @@ -99,21 +124,22 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba req.SetHeaders(map[string]string{ "Accept": "application/json, text/plain, */*", "CMS-DEVICE": "default", - "Authorization": "Basic " + d.Authorization, + "Authorization": "Basic " + d.getAuthorization(), "mcloud-channel": "1000101", "mcloud-client": "10701", //"mcloud-route": "001", "mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), //"mcloud-skey":"", - "mcloud-version": "6.6.0", - "Origin": "https://yun.139.com", - "Referer": "https://yun.139.com/w/", - "x-DeviceInfo": "||9|6.6.0|chrome|95.0.4638.69|uwIy75obnsRPIwlJSd7D9GhUvFwG96ce||macos 10.15.2||zh-CN|||", - "x-huawei-channelSrc": "10000034", - "x-inner-ntwk": "2", - "x-m4c-caller": "PC", - "x-m4c-src": "10002", - "x-SvcType": svcType, + "mcloud-version": "7.14.0", + "Origin": "https://yun.139.com", + "Referer": "https://yun.139.com/w/", + "x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", + "x-huawei-channelSrc": "10000034", + "x-inner-ntwk": "2", + "x-m4c-caller": "PC", + "x-m4c-src": "10002", + "x-SvcType": svcType, + "Inner-Hcy-Router-Https": "1", }) var e BaseResp @@ -131,6 +157,64 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba } return res.Body(), nil } + +func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error) { + url := "https://user-njs.yun.139.com/user/route/qryRoutePolicy" + req := base.RestyClient.R() + randStr := random.String(16) + ts := time.Now().Format("2006-01-02 15:04:05") + callback := func(req *resty.Request) { + req.SetBody(data) + } + if callback != nil { + callback(req) + } + body, err := utils.Json.Marshal(req.Body) + if err != nil { + return nil, err + } + sign := calSign(string(body), ts, randStr) + svcType := "1" + if d.isFamily() { + svcType = "2" + } + req.SetHeaders(map[string]string{ + "Accept": "application/json, text/plain, */*", + "CMS-DEVICE": "default", + "Authorization": "Basic " + d.getAuthorization(), + "mcloud-channel": "1000101", + "mcloud-client": "10701", + //"mcloud-route": "001", + "mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), + //"mcloud-skey":"", + "mcloud-version": "7.14.0", + "Origin": "https://yun.139.com", + "Referer": "https://yun.139.com/w/", + "x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", + "x-huawei-channelSrc": "10000034", + "x-inner-ntwk": "2", + "x-m4c-caller": "PC", + "x-m4c-src": "10002", + "x-SvcType": svcType, + "Inner-Hcy-Router-Https": "1", + }) + + var e BaseResp + req.SetResult(&e) + res, err := req.Execute(http.MethodPost, url) + log.Debugln(res.String()) + if !e.Success { + return nil, errors.New(e.Message) + } + if resp != nil { + err = utils.Json.Unmarshal(res.Body(), resp) + if err != nil { + return nil, err + } + } + return res.Body(), nil +} + func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) { return d.request(pathname, http.MethodPost, func(req *resty.Request) { req.SetBody(data) @@ -151,7 +235,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) { "catalogSortType": 0, "contentSortType": 0, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -199,7 +283,7 @@ func (d *Yun139) newJson(data map[string]interface{}) base.Json { "cloudID": d.CloudID, "cloudType": 1, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -220,10 +304,11 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { "sortDirection": 1, }) var resp QueryContentListResp - _, err := d.post("/orchestration/familyCloud/content/v1.0/queryContentList", data, &resp) + _, err := d.post("/orchestration/familyCloud-rebuild/content/v1.2/queryContentList", data, &resp) if err != nil { return nil, err } + path := resp.Data.Path for _, catalog := range resp.Data.CloudCatalogList { f := model.Object{ ID: catalog.CatalogID, @@ -232,6 +317,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { IsFolder: true, Modified: getTime(catalog.LastUpdateTime), Ctime: getTime(catalog.CreateTime), + Path: path, // 文件夹上一级的Path } files = append(files, &f) } @@ -243,13 +329,14 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { Size: content.ContentSize, Modified: getTime(content.LastUpdateTime), Ctime: getTime(content.CreateTime), + Path: path, // 文件所在目录的Path }, Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL}, //Thumbnail: content.BigthumbnailURL, } files = append(files, &f) } - if 100*pageNum > resp.Data.TotalCount { + if resp.Data.TotalCount == 0 { break } pageNum++ @@ -257,12 +344,67 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) { return files, nil } +func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) { + pageNum := 1 + files := make([]model.Obj, 0) + for { + data := d.newJson(base.Json{ + "groupID": d.CloudID, + "catalogID": path.Base(catalogID), + "contentSortType": 0, + "sortDirection": 1, + "startNumber": pageNum, + "endNumber": pageNum + 99, + "path": path.Join(d.RootFolderID, catalogID), + }) + + var resp QueryGroupContentListResp + _, err := d.post("/orchestration/group-rebuild/content/v1.0/queryGroupContentList", data, &resp) + if err != nil { + return nil, err + } + path := resp.Data.GetGroupContentResult.ParentCatalogID + for _, catalog := range resp.Data.GetGroupContentResult.CatalogList { + f := model.Object{ + ID: catalog.CatalogID, + Name: catalog.CatalogName, + Size: 0, + IsFolder: true, + Modified: getTime(catalog.UpdateTime), + Ctime: getTime(catalog.CreateTime), + Path: catalog.Path, // 文件夹的真实Path, root:/开头 + } + files = append(files, &f) + } + for _, content := range resp.Data.GetGroupContentResult.ContentList { + f := model.ObjThumb{ + Object: model.Object{ + ID: content.ContentID, + Name: content.ContentName, + Size: content.ContentSize, + Modified: getTime(content.UpdateTime), + Ctime: getTime(content.CreateTime), + Path: path, // 文件所在目录的Path + }, + Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL}, + //Thumbnail: content.BigthumbnailURL, + } + files = append(files, &f) + } + if (pageNum + 99) > resp.Data.GetGroupContentResult.NodeCount { + break + } + pageNum = pageNum + 100 + } + return files, nil +} + func (d *Yun139) getLink(contentId string) (string, error) { data := base.Json{ "appName": "", "contentID": contentId, "commonAccountInfo": base.Json{ - "account": d.Account, + "account": d.getAccount(), "accountType": 1, }, } @@ -273,6 +415,32 @@ func (d *Yun139) getLink(contentId string) (string, error) { } return jsoniter.Get(res, "data", "downloadURL").ToString(), nil } +func (d *Yun139) familyGetLink(contentId string, path string) (string, error) { + data := d.newJson(base.Json{ + "contentID": contentId, + "path": path, + }) + res, err := d.post("/orchestration/familyCloud-rebuild/content/v1.0/getFileDownLoadURL", + data, nil) + if err != nil { + return "", err + } + return jsoniter.Get(res, "data", "downloadURL").ToString(), nil +} + +func (d *Yun139) groupGetLink(contentId string, path string) (string, error) { + data := d.newJson(base.Json{ + "contentID": contentId, + "groupID": d.CloudID, + "path": path, + }) + res, err := d.post("/orchestration/group-rebuild/groupManage/v1.0/getGroupFileDownLoadURL", + data, nil) + if err != nil { + return "", err + } + return jsoniter.Get(res, "data", "downloadURL").ToString(), nil +} func unicode(str string) string { textQuoted := strconv.QuoteToASCII(str) @@ -281,7 +449,7 @@ func unicode(str string) string { } func (d *Yun139) personalRequest(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { - url := "https://personal-kd-njs.yun.139.com" + pathname + url := d.getPersonalCloudHost() + pathname req := base.RestyClient.R() randStr := random.String(16) ts := time.Now().Format("2006-01-02 15:04:05") @@ -299,17 +467,15 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R } req.SetHeaders(map[string]string{ "Accept": "application/json, text/plain, */*", - "Authorization": "Basic " + d.Authorization, + "Authorization": "Basic " + d.getAuthorization(), "Caller": "web", "Cms-Device": "default", "Mcloud-Channel": "1000101", "Mcloud-Client": "10701", "Mcloud-Route": "001", "Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), - "Mcloud-Version": "7.13.0", - "Origin": "https://yun.139.com", - "Referer": "https://yun.139.com/w/", - "x-DeviceInfo": "||9|7.13.0|chrome|120.0.0.0|||windows 10||zh-CN|||", + "Mcloud-Version": "7.14.0", + "x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", "x-huawei-channelSrc": "10000034", "x-inner-ntwk": "2", "x-m4c-caller": "PC", @@ -318,7 +484,7 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R "X-Yun-Api-Version": "v1", "X-Yun-App-Channel": "10000034", "X-Yun-Channel-Source": "10000034", - "X-Yun-Client-Info": "||9|7.13.0|chrome|120.0.0.0|||windows 10||zh-CN|||dW5kZWZpbmVk||", + "X-Yun-Client-Info": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||dW5kZWZpbmVk||", "X-Yun-Module-Type": "100", "X-Yun-Svc-Type": "1", }) @@ -370,7 +536,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) { "parentFileId": fileId, } var resp PersonalListResp - _, err := d.personalPost("/hcy/file/list", data, &resp) + _, err := d.personalPost("/file/list", data, &resp) if err != nil { return nil, err } @@ -390,7 +556,15 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) { } else { var Thumbnails = item.Thumbnails var ThumbnailUrl string - if len(Thumbnails) > 0 { + if d.UseLargeThumbnail { + for _, thumb := range Thumbnails { + if strings.Contains(thumb.Style, "Large") { + ThumbnailUrl = thumb.Url + break + } + } + } + if ThumbnailUrl == "" && len(Thumbnails) > 0 { ThumbnailUrl = Thumbnails[len(Thumbnails)-1].Url } f = &model.ObjThumb{ @@ -418,7 +592,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) { data := base.Json{ "fileId": fileId, } - res, err := d.personalPost("/hcy/file/getDownloadUrl", + res, err := d.personalPost("/file/getDownloadUrl", data, nil) if err != nil { return "", err @@ -430,3 +604,22 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) { return jsoniter.Get(res, "data", "url").ToString(), nil } } + +func (d *Yun139) getAuthorization() string { + if d.ref != nil { + return d.ref.getAuthorization() + } + return d.Authorization +} +func (d *Yun139) getAccount() string { + if d.ref != nil { + return d.ref.getAccount() + } + return d.Account +} +func (d *Yun139) getPersonalCloudHost() string { + if d.ref != nil { + return d.ref.getPersonalCloudHost() + } + return d.PersonalCloudHost +} diff --git a/drivers/189/util.go b/drivers/189/util.go index 0b4c0633..16a5aa39 100644 --- a/drivers/189/util.go +++ b/drivers/189/util.go @@ -365,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F log.Debugf("uploadData: %+v", uploadData) requestURL := uploadData.RequestURL uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&") - req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData)) + req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) if err != nil { return err } @@ -375,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F req.Header.Set(v[0:i], v[i+1:]) } r, err := base.HttpClient.Do(req) - log.Debugf("%+v %+v", r, r.Request.Header) - r.Body.Close() if err != nil { return err } + log.Debugf("%+v %+v", r, r.Request.Header) + _ = r.Body.Close() up(float64(i) * 100 / float64(count)) } fileMd5 := hex.EncodeToString(md5Sum.Sum(nil)) diff --git a/drivers/189pc/driver.go b/drivers/189pc/driver.go index 9c01a50f..c91caf2f 100644 --- a/drivers/189pc/driver.go +++ b/drivers/189pc/driver.go @@ -1,8 +1,8 @@ package _189pc import ( - "container/ring" "context" + "fmt" "net/http" "strconv" "strings" @@ -14,6 +14,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" + "github.com/google/uuid" ) type Cloud189PC struct { @@ -29,10 +30,11 @@ type Cloud189PC struct { uploadThread int - familyTransferFolder *ring.Ring + familyTransferFolder *Cloud189Folder cleanFamilyTransferFile func() storageConfig driver.Config + ref *Cloud189PC } func (y *Cloud189PC) Config() driver.Config { @@ -47,9 +49,18 @@ func (y *Cloud189PC) GetAddition() driver.Additional { } func (y *Cloud189PC) Init(ctx context.Context) (err error) { - // 兼容旧上传接口 - y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old") - + y.storageConfig = config + if y.isFamily() { + // 兼容旧上传接口 + if y.Addition.RapidUpload || y.Addition.UploadMethod == "old" { + y.storageConfig.NoOverwriteUpload = true + } + } else { + // 家庭云转存,不支持覆盖上传 + if y.Addition.FamilyTransfer { + y.storageConfig.NoOverwriteUpload = true + } + } // 处理个人云和家庭云参数 if y.isFamily() && y.RootFolderID == "-11" { y.RootFolderID = "" @@ -64,20 +75,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) { y.uploadThread, y.UploadThread = 3, "3" } - // 初始化请求客户端 - if y.client == nil { - y.client = base.NewRestyClient().SetHeaders(map[string]string{ - "Accept": "application/json;charset=UTF-8", - "Referer": WEB_URL, - }) - } + if y.ref == nil { + // 初始化请求客户端 + if y.client == nil { + y.client = base.NewRestyClient().SetHeaders(map[string]string{ + "Accept": "application/json;charset=UTF-8", + "Referer": WEB_URL, + }) + } - // 避免重复登陆 - identity := utils.GetMD5EncodeStr(y.Username + y.Password) - if !y.isLogin() || y.identity != identity { - y.identity = identity - if err = y.login(); err != nil { - return + // 避免重复登陆 + identity := utils.GetMD5EncodeStr(y.Username + y.Password) + if !y.isLogin() || y.identity != identity { + y.identity = identity + if err = y.login(); err != nil { + return + } } } @@ -88,13 +101,14 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) { } } - // 创建中转文件夹,防止重名文件 + // 创建中转文件夹 if y.FamilyTransfer { - if y.familyTransferFolder, err = y.createFamilyTransferFolder(32); err != nil { + if err := y.createFamilyTransferFolder(); err != nil { return err } } + // 清理转存文件节流 y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() { if err := y.cleanFamilyTransfer(context.TODO()); err != nil { utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err) @@ -103,7 +117,17 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) { return } +func (d *Cloud189PC) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*Cloud189PC) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport +} + func (y *Cloud189PC) Drop(ctx context.Context) error { + y.ref = nil return nil } @@ -314,35 +338,49 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if !isFamily && y.FamilyTransfer { // 修改上传目标为家庭云文件夹 transferDstDir := dstDir - dstDir = (y.familyTransferFolder.Value).(*Cloud189Folder) - y.familyTransferFolder = y.familyTransferFolder.Next() + dstDir = y.familyTransferFolder + // 使用临时文件名 + srcName := stream.GetName() + stream = &WrapFileStreamer{ + FileStreamer: stream, + Name: fmt.Sprintf("0%s.transfer", uuid.NewString()), + } + + // 使用家庭云上传 isFamily = true overwrite = false defer func() { if newObj != nil { - // 批量任务有概率删不掉 - y.cleanFamilyTransferFile() - // 转存家庭云文件到个人云 err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true) - - task := BatchTaskInfo{ - FileId: newObj.GetID(), - FileName: newObj.GetName(), - IsFolder: BoolToNumber(newObj.IsDir()), + // 删除家庭云源文件 + go y.Delete(context.TODO(), y.FamilyID, newObj) + // 批量任务有概率删不掉 + go y.cleanFamilyTransferFile() + // 转存失败返回错误 + if err != nil { + return } - // 删除源文件 - if resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, task); err == nil { - y.WaitBatchTask("DELETE", resp.TaskID, time.Second) - // 永久删除 - if resp, err := y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, task); err == nil { - y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second) + // 查找转存文件 + var file *Cloud189File + file, err = y.findFileByName(context.TODO(), newObj.GetName(), transferDstDir.GetID(), false) + if err != nil { + if err == errs.ObjectNotFound { + err = fmt.Errorf("unknown error: No transfer file obtained %s", newObj.GetName()) } + return } - newObj = nil + + // 重命名转存文件 + newObj, err = y.Rename(context.TODO(), file, srcName) + if err != nil { + // 重命名失败删除源文件 + _ = y.Delete(context.TODO(), "", file) + } + return } }() } diff --git a/drivers/189pc/help.go b/drivers/189pc/help.go index 49f957fa..bac8880a 100644 --- a/drivers/189pc/help.go +++ b/drivers/189pc/help.go @@ -18,6 +18,7 @@ import ( "strings" "time" + "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils/random" ) @@ -208,3 +209,12 @@ func IF[V any](o bool, t V, f V) V { } return f } + +type WrapFileStreamer struct { + model.FileStreamer + Name string +} + +func (w *WrapFileStreamer) GetName() string { + return w.Name +} diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index f5a44455..c391f7e6 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -2,30 +2,32 @@ package _189pc import ( "bytes" - "container/ring" "context" - "crypto/md5" "encoding/base64" "encoding/hex" "encoding/xml" "fmt" "io" - "math" "net/http" "net/http/cookiejar" "net/url" + "os" "regexp" "sort" "strconv" "strings" "time" + "golang.org/x/sync/semaphore" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/errgroup" "github.com/alist-org/alist/v3/pkg/utils" @@ -57,11 +59,11 @@ const ( func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string { dateOfGmt := getHttpDateStr() - sessionKey := y.tokenInfo.SessionKey - sessionSecret := y.tokenInfo.SessionSecret + sessionKey := y.getTokenInfo().SessionKey + sessionSecret := y.getTokenInfo().SessionSecret if isFamily { - sessionKey = y.tokenInfo.FamilySessionKey - sessionSecret = y.tokenInfo.FamilySessionSecret + sessionKey = y.getTokenInfo().FamilySessionKey + sessionSecret = y.getTokenInfo().FamilySessionSecret } header := map[string]string{ @@ -74,9 +76,9 @@ func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) } func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string { - sessionSecret := y.tokenInfo.SessionSecret + sessionSecret := y.getTokenInfo().SessionSecret if isFamily { - sessionSecret = y.tokenInfo.FamilySessionSecret + sessionSecret = y.getTokenInfo().FamilySessionSecret } if params != nil { return AesECBEncrypt(params.Encode(), sessionSecret[:16]) @@ -85,7 +87,7 @@ func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string { } func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) { - req := y.client.R().SetQueryParams(clientSuffix()) + req := y.getClient().R().SetQueryParams(clientSuffix()) // 设置params paramsData := y.EncryptParams(params, isBool(isFamily...)) @@ -174,8 +176,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str } var erron RespErr - jsoniter.Unmarshal(body, &erron) - xml.Unmarshal(body, &erron) + _ = jsoniter.Unmarshal(body, &erron) + _ = xml.Unmarshal(body, &erron) if erron.HasError() { return nil, &erron } @@ -185,39 +187,9 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str return body, nil } func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) { - fullUrl := API_URL - if isFamily { - fullUrl += "/family/file" - } - fullUrl += "/listFiles.action" - - res := make([]model.Obj, 0, 130) + res := make([]model.Obj, 0, 100) for pageNum := 1; ; pageNum++ { - var resp Cloud189FilesResp - _, err := y.get(fullUrl, func(r *resty.Request) { - r.SetContext(ctx) - r.SetQueryParams(map[string]string{ - "folderId": fileId, - "fileType": "0", - "mediaAttr": "0", - "iconOption": "5", - "pageNum": fmt.Sprint(pageNum), - "pageSize": "130", - }) - if isFamily { - r.SetQueryParams(map[string]string{ - "familyId": y.FamilyID, - "orderBy": toFamilyOrderBy(y.OrderBy), - "descending": toDesc(y.OrderDirection), - }) - } else { - r.SetQueryParams(map[string]string{ - "recursive": "0", - "orderBy": y.OrderBy, - "descending": toDesc(y.OrderDirection), - }) - } - }, &resp, isFamily) + resp, err := y.getFilesWithPage(ctx, fileId, isFamily, pageNum, 1000, y.OrderBy, y.OrderDirection) if err != nil { return nil, err } @@ -236,6 +208,63 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) return res, nil } +func (y *Cloud189PC) getFilesWithPage(ctx context.Context, fileId string, isFamily bool, pageNum int, pageSize int, orderBy string, orderDirection string) (*Cloud189FilesResp, error) { + fullUrl := API_URL + if isFamily { + fullUrl += "/family/file" + } + fullUrl += "/listFiles.action" + + var resp Cloud189FilesResp + _, err := y.get(fullUrl, func(r *resty.Request) { + r.SetContext(ctx) + r.SetQueryParams(map[string]string{ + "folderId": fileId, + "fileType": "0", + "mediaAttr": "0", + "iconOption": "5", + "pageNum": fmt.Sprint(pageNum), + "pageSize": fmt.Sprint(pageSize), + }) + if isFamily { + r.SetQueryParams(map[string]string{ + "familyId": y.FamilyID, + "orderBy": toFamilyOrderBy(orderBy), + "descending": toDesc(orderDirection), + }) + } else { + r.SetQueryParams(map[string]string{ + "recursive": "0", + "orderBy": orderBy, + "descending": toDesc(orderDirection), + }) + } + }, &resp, isFamily) + if err != nil { + return nil, err + } + return &resp, nil +} + +func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, folderId string, isFamily bool) (*Cloud189File, error) { + for pageNum := 1; ; pageNum++ { + resp, err := y.getFilesWithPage(ctx, folderId, isFamily, pageNum, 10, "filename", "asc") + if err != nil { + return nil, err + } + // 获取完毕跳出 + if resp.FileListAO.Count == 0 { + return nil, errs.ObjectNotFound + } + for i := 0; i < len(resp.FileListAO.FileList); i++ { + file := resp.FileListAO.FileList[i] + if file.Name == searchName { + return &file, nil + } + } + } +} + func (y *Cloud189PC) login() (err error) { // 初始化登陆所需参数 if y.loginParam == nil { @@ -403,6 +432,9 @@ func (y *Cloud189PC) initLoginParam() error { // 刷新会话 func (y *Cloud189PC) refreshSession() (err error) { + if y.ref != nil { + return y.ref.refreshSession() + } var erron RespErr var userSessionResp UserSessionResp _, err = y.client.R(). @@ -441,12 +473,8 @@ func (y *Cloud189PC) refreshSession() (err error) { // 普通上传 // 无法上传大小为0的文件 func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { - var sliceSize = partSize(file.GetSize()) - count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize))) - lastPartSize := file.GetSize() % sliceSize - if file.GetSize() > 0 && lastPartSize == 0 { - lastPartSize = sliceSize - } + size := file.GetSize() + sliceSize := partSize(size) params := Params{ "parentFolderId": dstDir.GetID(), @@ -478,24 +506,32 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + sem := semaphore.NewWeighted(3) - fileMd5 := md5.New() - silceMd5 := md5.New() + count := int(size / sliceSize) + lastPartSize := size % sliceSize + if lastPartSize > 0 { + count++ + } else { + lastPartSize = sliceSize + } + fileMd5 := utils.MD5.NewFunc() + silceMd5 := utils.MD5.NewFunc() silceMd5Hexs := make([]string, 0, count) - + teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)) + byteSize := sliceSize for i := 1; i <= count; i++ { if utils.IsCanceled(upCtx) { break } - - byteData := make([]byte, sliceSize) if i == count { - byteData = byteData[:lastPartSize] + byteSize = lastPartSize } - + byteData := make([]byte, byteSize) // 读取块 silceMd5.Reset() - if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil { + if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil { + sem.Release(1) return nil, err } @@ -505,6 +541,10 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) threadG.Go(func(ctx context.Context) error { + if err = sem.Acquire(ctx, 1); err != nil { + return err + } + defer sem.Release(1) uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) if err != nil { return err @@ -512,7 +552,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo // step.4 上传切片 uploadUrl := uploadUrls[0] - _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily) + _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, + driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily) if err != nil { return err } @@ -569,24 +610,43 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m // 快传 func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { - tempFile, err := file.CacheFullInTempFile() - if err != nil { - return nil, err + var ( + cache = file.GetFile() + tmpF *os.File + err error + ) + size := file.GetSize() + if _, ok := cache.(io.ReaderAt); !ok && size > 0 { + tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return nil, err + } + defer func() { + _ = tmpF.Close() + _ = os.Remove(tmpF.Name()) + }() + cache = tmpF } - - var sliceSize = partSize(file.GetSize()) - count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize))) - lastSliceSize := file.GetSize() % sliceSize - if file.GetSize() > 0 && lastSliceSize == 0 { + sliceSize := partSize(size) + count := int(size / sliceSize) + lastSliceSize := size % sliceSize + if lastSliceSize > 0 { + count++ + } else { lastSliceSize = sliceSize } //step.1 优先计算所需信息 byteSize := sliceSize - fileMd5 := md5.New() - silceMd5 := md5.New() - silceMd5Hexs := make([]string, 0, count) + fileMd5 := utils.MD5.NewFunc() + sliceMd5 := utils.MD5.NewFunc() + sliceMd5Hexs := make([]string, 0, count) partInfos := make([]string, 0, count) + writers := []io.Writer{fileMd5, sliceMd5} + if tmpF != nil { + writers = append(writers, tmpF) + } + written := int64(0) for i := 1; i <= count; i++ { if utils.IsCanceled(ctx) { return nil, ctx.Err() @@ -596,19 +656,31 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode byteSize = lastSliceSize } - silceMd5.Reset() - if _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF { + n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), file, byteSize) + written += n + if err != nil && err != io.EOF { return nil, err } - md5Byte := silceMd5.Sum(nil) - silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte))) + md5Byte := sliceMd5.Sum(nil) + sliceMd5Hexs = append(sliceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte))) partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte))) + sliceMd5.Reset() + } + + if tmpF != nil { + if size > 0 && written != size { + return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, size) + } + _, err = tmpF.Seek(0, io.SeekStart) + if err != nil { + return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ") + } } fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil))) sliceMd5Hex := fileMd5Hex - if file.GetSize() > sliceSize { - sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n"))) + if size > sliceSize { + sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(sliceMd5Hexs, "\n"))) } fullUrl := UPLOAD_URL @@ -620,7 +692,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode } // 尝试恢复进度 - uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex) + uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.getTokenInfo().SessionKey, fileMd5Hex) if !ok { //step.2 预上传 params := Params{ @@ -674,7 +746,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode } // step.4 上传切片 - _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize), isFamily) + _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily) if err != nil { return err } @@ -687,7 +759,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode if err = threadG.Wait(); err != nil { if errors.Is(err, context.Canceled) { uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" }) - base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex) + base.SaveUploadProgress(y, uploadProgress, y.getTokenInfo().SessionKey, fileMd5Hex) } return nil, err } @@ -756,14 +828,11 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo // 旧版本上传,家庭云不支持覆盖 func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { - tempFile, err := file.CacheFullInTempFile() - if err != nil { - return nil, err - } - fileMd5, err := utils.HashFile(utils.MD5, tempFile) + tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5) if err != nil { return nil, err } + rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile)) // 创建上传会话 uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily) @@ -790,7 +859,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId) } - _, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily) + _, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily) if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" { return nil, err } @@ -899,8 +968,7 @@ func (y *Cloud189PC) isLogin() bool { } // 创建家庭云中转文件夹 -func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) { - folders := ring.New(count) +func (y *Cloud189PC) createFamilyTransferFolder() error { var rootFolder Cloud189Folder _, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) { req.SetQueryParams(map[string]string{ @@ -909,81 +977,61 @@ func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) { }) }, &rootFolder, true) if err != nil { - return nil, err + return err } - - folderCount := 0 - - // 获取已有目录 - files, err := y.getFiles(context.TODO(), rootFolder.GetID(), true) - if err != nil { - return nil, err - } - for _, file := range files { - if folder, ok := file.(*Cloud189Folder); ok { - folders.Value = folder - folders = folders.Next() - folderCount++ - } - } - - // 创建新的目录 - for folderCount < count { - var newFolder Cloud189Folder - _, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) { - req.SetQueryParams(map[string]string{ - "folderName": uuid.NewString(), - "familyId": y.FamilyID, - "parentId": rootFolder.GetID(), - }) - }, &newFolder, true) - if err != nil { - return nil, err - } - folders.Value = &newFolder - folders = folders.Next() - folderCount++ - } - return folders, nil + y.familyTransferFolder = &rootFolder + return nil } // 清理中转文件夹 func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error { - var tasks []BatchTaskInfo - r := y.familyTransferFolder - for p := r.Next(); p != r; p = p.Next() { - folder := p.Value.(*Cloud189Folder) - - files, err := y.getFiles(ctx, folder.GetID(), true) + transferFolderId := y.familyTransferFolder.GetID() + for pageNum := 1; ; pageNum++ { + resp, err := y.getFilesWithPage(ctx, transferFolderId, true, pageNum, 100, "lastOpTime", "asc") if err != nil { return err } - for _, file := range files { + // 获取完毕跳出 + if resp.FileListAO.Count == 0 { + break + } + + var tasks []BatchTaskInfo + for i := 0; i < len(resp.FileListAO.FolderList); i++ { + folder := resp.FileListAO.FolderList[i] + tasks = append(tasks, BatchTaskInfo{ + FileId: folder.GetID(), + FileName: folder.GetName(), + IsFolder: BoolToNumber(folder.IsDir()), + }) + } + for i := 0; i < len(resp.FileListAO.FileList); i++ { + file := resp.FileListAO.FileList[i] tasks = append(tasks, BatchTaskInfo{ FileId: file.GetID(), FileName: file.GetName(), IsFolder: BoolToNumber(file.IsDir()), }) } - } - if len(tasks) > 0 { - // 删除 - resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...) - if err != nil { + if len(tasks) > 0 { + // 删除 + resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...) + if err != nil { + return err + } + err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second) + if err != nil { + return err + } + // 永久删除 + resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...) + if err != nil { + return err + } + err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second) return err } - err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second) - if err != nil { - return err - } - // 永久删除 - resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...) - if err != nil { - return err - } - err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second) - return err } return nil } @@ -1008,7 +1056,7 @@ func (y *Cloud189PC) getFamilyID() (string, error) { return "", fmt.Errorf("cannot get automatically,please input family_id") } for _, info := range infos { - if strings.Contains(y.tokenInfo.LoginName, info.RemarkName) { + if strings.Contains(y.getTokenInfo().LoginName, info.RemarkName) { return fmt.Sprint(info.FamilyID), nil } } @@ -1060,6 +1108,34 @@ func (y *Cloud189PC) SaveFamilyFileToPersonCloud(ctx context.Context, familyId s } } +// 永久删除文件 +func (y *Cloud189PC) Delete(ctx context.Context, familyId string, srcObj model.Obj) error { + task := BatchTaskInfo{ + FileId: srcObj.GetID(), + FileName: srcObj.GetName(), + IsFolder: BoolToNumber(srcObj.IsDir()), + } + // 删除源文件 + resp, err := y.CreateBatchTask("DELETE", familyId, "", nil, task) + if err != nil { + return err + } + err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second) + if err != nil { + return err + } + // 清除回收站 + resp, err = y.CreateBatchTask("CLEAR_RECYCLE", familyId, "", nil, task) + if err != nil { + return err + } + err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second) + if err != nil { + return err + } + return nil +} + func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) { var resp CreateBatchTaskResp _, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) { @@ -1142,3 +1218,17 @@ func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) time.Sleep(t) } } + +func (y *Cloud189PC) getTokenInfo() *AppSessionResp { + if y.ref != nil { + return y.ref.getTokenInfo() + } + return y.tokenInfo +} + +func (y *Cloud189PC) getClient() *resty.Client { + if y.ref != nil { + return y.ref.getClient() + } + return y.client +} diff --git a/drivers/alias/driver.go b/drivers/alias/driver.go index 1b439a2c..e292a628 100644 --- a/drivers/alias/driver.go +++ b/drivers/alias/driver.go @@ -3,6 +3,7 @@ package alias import ( "context" "errors" + stdpath "path" "strings" "github.com/alist-org/alist/v3/internal/driver" @@ -110,14 +111,62 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( for _, dst := range dsts { link, err := d.link(ctx, dst, sub, args) if err == nil { + if !args.Redirect && len(link.URL) > 0 { + // 正常情况下 多并发 仅支持返回URL的驱动 + // alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发 + if d.DownloadConcurrency > 0 { + link.Concurrency = d.DownloadConcurrency + } + if d.DownloadPartSize > 0 { + link.PartSize = d.DownloadPartSize * utils.KB + } + } return link, nil } } return nil, errs.ObjectNotFound } +func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, parentDir, true) + if err == nil { + return fs.MakeDir(ctx, stdpath.Join(*reqPath, dirName)) + } + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot make sub-dir") + } + return err +} + +func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if !d.Writable { + return errs.PermissionDenied + } + srcPath, err := d.getReqPath(ctx, srcObj, false) + if errs.IsNotImplement(err) { + return errors.New("same-name files cannot be moved") + } + if err != nil { + return err + } + dstPath, err := d.getReqPath(ctx, dstDir, true) + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot be moved to") + } + if err != nil { + return err + } + return fs.Move(ctx, *srcPath, *dstPath) +} + func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error { - reqPath, err := d.getReqPath(ctx, srcObj) + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, srcObj, false) if err == nil { return fs.Rename(ctx, *reqPath, newName) } @@ -127,8 +176,33 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er return err } +func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + if !d.Writable { + return errs.PermissionDenied + } + srcPath, err := d.getReqPath(ctx, srcObj, false) + if errs.IsNotImplement(err) { + return errors.New("same-name files cannot be copied") + } + if err != nil { + return err + } + dstPath, err := d.getReqPath(ctx, dstDir, true) + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot be copied to") + } + if err != nil { + return err + } + _, err = fs.Copy(ctx, *srcPath, *dstPath) + return err +} + func (d *Alias) Remove(ctx context.Context, obj model.Obj) error { - reqPath, err := d.getReqPath(ctx, obj) + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, obj, false) if err == nil { return fs.Remove(ctx, *reqPath) } @@ -138,4 +212,110 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error { return err } +func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, dstDir, true) + if err == nil { + return fs.PutDirectly(ctx, *reqPath, s) + } + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot be Put") + } + return err +} + +func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error { + if !d.Writable { + return errs.PermissionDenied + } + reqPath, err := d.getReqPath(ctx, dstDir, true) + if err == nil { + return fs.PutURL(ctx, *reqPath, name, url) + } + if errs.IsNotImplement(err) { + return errors.New("same-name files cannot offline download") + } + return err +} + +func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + root, sub := d.getRootAndPath(obj.GetPath()) + dsts, ok := d.pathMap[root] + if !ok { + return nil, errs.ObjectNotFound + } + for _, dst := range dsts { + meta, err := d.getArchiveMeta(ctx, dst, sub, args) + if err == nil { + return meta, nil + } + } + return nil, errs.NotImplement +} + +func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + root, sub := d.getRootAndPath(obj.GetPath()) + dsts, ok := d.pathMap[root] + if !ok { + return nil, errs.ObjectNotFound + } + for _, dst := range dsts { + l, err := d.listArchive(ctx, dst, sub, args) + if err == nil { + return l, nil + } + } + return nil, errs.NotImplement +} + +func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // alias的两个驱动,一个支持驱动提取,一个不支持,如何兼容? + // 如果访问的是不支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回errs.NotImplement,提取URL前缀就会是/ae,Extract就不会被调用 + // 如果访问的是支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回有效值,提取URL前缀就会是/ad,Extract就会被调用 + root, sub := d.getRootAndPath(obj.GetPath()) + dsts, ok := d.pathMap[root] + if !ok { + return nil, errs.ObjectNotFound + } + for _, dst := range dsts { + link, err := d.extract(ctx, dst, sub, args) + if err == nil { + if !args.Redirect && len(link.URL) > 0 { + if d.DownloadConcurrency > 0 { + link.Concurrency = d.DownloadConcurrency + } + if d.DownloadPartSize > 0 { + link.PartSize = d.DownloadPartSize * utils.KB + } + } + return link, nil + } + } + return nil, errs.NotImplement +} + +func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error { + if !d.Writable { + return errs.PermissionDenied + } + srcPath, err := d.getReqPath(ctx, srcObj, false) + if errs.IsNotImplement(err) { + return errors.New("same-name files cannot be decompressed") + } + if err != nil { + return err + } + dstPath, err := d.getReqPath(ctx, dstDir, true) + if errs.IsNotImplement(err) { + return errors.New("same-name dirs cannot be decompressed to") + } + if err != nil { + return err + } + _, err = fs.ArchiveDecompress(ctx, *srcPath, *dstPath, args) + return err +} + var _ driver.Driver = (*Alias)(nil) diff --git a/drivers/alias/meta.go b/drivers/alias/meta.go index 45b88575..70dc59f0 100644 --- a/drivers/alias/meta.go +++ b/drivers/alias/meta.go @@ -9,15 +9,18 @@ type Addition struct { // Usually one of two // driver.RootPath // define other - Paths string `json:"paths" required:"true" type:"text"` - ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"` + Paths string `json:"paths" required:"true" type:"text"` + ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"` + DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"` + DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"` + Writable bool `json:"writable" type:"bool" default:"false"` } var config = driver.Config{ Name: "Alias", LocalSort: true, NoCache: true, - NoUpload: true, + NoUpload: false, DefaultRoot: "/", ProxyRangeOption: true, } diff --git a/drivers/alias/util.go b/drivers/alias/util.go index c0e9081b..ffb0b84f 100644 --- a/drivers/alias/util.go +++ b/drivers/alias/util.go @@ -3,12 +3,15 @@ package alias import ( "context" "fmt" + "net/url" stdpath "path" "strings" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" @@ -62,6 +65,7 @@ func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Ob Size: obj.GetSize(), Modified: obj.ModTime(), IsFolder: obj.IsDir(), + HashInfo: obj.GetHash(), }, nil } @@ -94,10 +98,15 @@ func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) { reqPath := stdpath.Join(dst, sub) - storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{}) + // 参考 crypt 驱动 + storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) if err != nil { return nil, err } + if _, ok := storage.(*Alias); !ok && !args.Redirect { + link, _, err := op.Link(ctx, storage, reqActualPath, args) + return link, err + } _, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) if err != nil { return nil, err @@ -114,13 +123,13 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) } return link, nil } - link, _, err := fs.Link(ctx, reqPath, args) + link, _, err := op.Link(ctx, storage, reqActualPath, args) return link, err } -func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) { +func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*string, error) { root, sub := d.getRootAndPath(obj.GetPath()) - if sub == "" { + if sub == "" && !isParent { return nil, errs.NotSupport } dsts, ok := d.pathMap[root] @@ -149,3 +158,68 @@ func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) } return reqPath, nil } + +func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) { + reqPath := stdpath.Join(dst, sub) + storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) + if err != nil { + return nil, err + } + if _, ok := storage.(driver.ArchiveReader); ok { + return op.GetArchiveMeta(ctx, storage, reqActualPath, model.ArchiveMetaArgs{ + ArchiveArgs: args, + Refresh: true, + }) + } + return nil, errs.NotImplement +} + +func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) { + reqPath := stdpath.Join(dst, sub) + storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) + if err != nil { + return nil, err + } + if _, ok := storage.(driver.ArchiveReader); ok { + return op.ListArchive(ctx, storage, reqActualPath, model.ArchiveListArgs{ + ArchiveInnerArgs: args, + Refresh: true, + }) + } + return nil, errs.NotImplement +} + +func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) { + reqPath := stdpath.Join(dst, sub) + storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) + if err != nil { + return nil, err + } + if _, ok := storage.(driver.ArchiveReader); ok { + if _, ok := storage.(*Alias); !ok && !args.Redirect { + link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args) + return link, err + } + _, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) + if err != nil { + return nil, err + } + if common.ShouldProxy(storage, stdpath.Base(sub)) { + link := &model.Link{ + URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s", + common.GetApiUrl(args.HttpReq), + utils.EncodePath(reqPath, true), + utils.EncodePath(args.InnerPath, true), + url.QueryEscape(args.Password), + sign.SignArchive(reqPath)), + } + if args.HttpReq != nil && d.ProxyRange { + link.RangeReadCloser = common.NoProxyRange + } + return link, nil + } + link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args) + return link, err + } + return nil, errs.NotImplement +} diff --git a/drivers/alist_v3/driver.go b/drivers/alist_v3/driver.go index d078c5fb..ac7e16a1 100644 --- a/drivers/alist_v3/driver.go +++ b/drivers/alist_v3/driver.go @@ -5,12 +5,14 @@ import ( "fmt" "io" "net/http" + "net/url" "path" "strings" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" @@ -34,7 +36,7 @@ func (d *AListV3) GetAddition() driver.Additional { func (d *AListV3) Init(ctx context.Context) error { d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/") var resp common.Resp[MeResp] - _, err := d.request("/me", http.MethodGet, func(req *resty.Request) { + _, _, err := d.request("/me", http.MethodGet, func(req *resty.Request) { req.SetResult(&resp) }) if err != nil { @@ -48,15 +50,15 @@ func (d *AListV3) Init(ctx context.Context) error { } } // re-get the user info - _, err = d.request("/me", http.MethodGet, func(req *resty.Request) { + _, _, err = d.request("/me", http.MethodGet, func(req *resty.Request) { req.SetResult(&resp) }) if err != nil { return err } if resp.Data.Role == model.GUEST { - url := d.Address + "/api/public/settings" - res, err := base.RestyClient.R().Get(url) + u := d.Address + "/api/public/settings" + res, err := base.RestyClient.R().Get(u) if err != nil { return err } @@ -74,7 +76,7 @@ func (d *AListV3) Drop(ctx context.Context) error { func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { var resp common.Resp[FsListResp] - _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) { req.SetResult(&resp).SetBody(ListReq{ PageReq: model.PageReq{ Page: 1, @@ -116,7 +118,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) userAgent = base.UserAgent } } - _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) { req.SetResult(&resp).SetBody(FsGetReq{ Path: file.GetPath(), Password: d.MetaPassword, @@ -131,7 +133,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) } func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { - _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) { req.SetBody(MkdirOrLinkReq{ Path: path.Join(parentDir.GetPath(), dirName), }) @@ -140,7 +142,7 @@ func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri } func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error { - _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) { req.SetBody(MoveCopyReq{ SrcDir: path.Dir(srcObj.GetPath()), DstDir: dstDir.GetPath(), @@ -151,7 +153,7 @@ func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error { } func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error { - _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) { req.SetBody(RenameReq{ Path: srcObj.GetPath(), Name: newName, @@ -161,7 +163,7 @@ func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) } func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { - _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) { req.SetBody(MoveCopyReq{ SrcDir: path.Dir(srcObj.GetPath()), DstDir: dstDir.GetPath(), @@ -172,7 +174,7 @@ func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { } func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error { - _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) { req.SetBody(RemoveReq{ Dir: path.Dir(obj.GetPath()), Names: []string{obj.GetName()}, @@ -181,16 +183,29 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", stream) +func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader) if err != nil { return err } req.Header.Set("Authorization", d.Token) - req.Header.Set("File-Path", path.Join(dstDir.GetPath(), stream.GetName())) + req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName())) req.Header.Set("Password", d.MetaPassword) + if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 { + req.Header.Set("X-File-Md5", md5) + } + if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 { + req.Header.Set("X-File-Sha1", sha1) + } + if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 { + req.Header.Set("X-File-Sha256", sha256) + } - req.ContentLength = stream.GetSize() + req.ContentLength = s.GetSize() // client := base.NewHttpClient() // client.Timeout = time.Hour * 6 res, err := base.HttpClient.Do(req) @@ -219,6 +234,127 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt return nil } +func (d *AListV3) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + if !d.ForwardArchiveReq { + return nil, errs.NotImplement + } + var resp common.Resp[ArchiveMetaResp] + _, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) { + req.SetResult(&resp).SetBody(ArchiveMetaReq{ + ArchivePass: args.Password, + Password: d.MetaPassword, + Path: obj.GetPath(), + Refresh: false, + }) + }) + if code == 202 { + return nil, errs.WrongArchivePassword + } + if err != nil { + return nil, err + } + var tree []model.ObjTree + if resp.Data.Content != nil { + tree = make([]model.ObjTree, 0, len(resp.Data.Content)) + for _, content := range resp.Data.Content { + tree = append(tree, &content) + } + } + return &model.ArchiveMetaInfo{ + Comment: resp.Data.Comment, + Encrypted: resp.Data.Encrypted, + Tree: tree, + }, nil +} + +func (d *AListV3) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + if !d.ForwardArchiveReq { + return nil, errs.NotImplement + } + var resp common.Resp[ArchiveListResp] + _, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) { + req.SetResult(&resp).SetBody(ArchiveListReq{ + ArchiveMetaReq: ArchiveMetaReq{ + ArchivePass: args.Password, + Password: d.MetaPassword, + Path: obj.GetPath(), + Refresh: false, + }, + PageReq: model.PageReq{ + Page: 1, + PerPage: 0, + }, + InnerPath: args.InnerPath, + }) + }) + if code == 202 { + return nil, errs.WrongArchivePassword + } + if err != nil { + return nil, err + } + var files []model.Obj + for _, f := range resp.Data.Content { + file := model.ObjThumb{ + Object: model.Object{ + Name: f.Name, + Modified: f.Modified, + Ctime: f.Created, + Size: f.Size, + IsFolder: f.IsDir, + HashInfo: utils.FromString(f.HashInfo), + }, + Thumbnail: model.Thumbnail{Thumbnail: f.Thumb}, + } + files = append(files, &file) + } + return files, nil +} + +func (d *AListV3) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + if !d.ForwardArchiveReq { + return nil, errs.NotSupport + } + var resp common.Resp[ArchiveMetaResp] + _, _, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) { + req.SetResult(&resp).SetBody(ArchiveMetaReq{ + ArchivePass: args.Password, + Password: d.MetaPassword, + Path: obj.GetPath(), + Refresh: false, + }) + }) + if err != nil { + return nil, err + } + return &model.Link{ + URL: fmt.Sprintf("%s?inner=%s&pass=%s&sign=%s", + resp.Data.RawURL, + utils.EncodePath(args.InnerPath, true), + url.QueryEscape(args.Password), + resp.Data.Sign), + }, nil +} + +func (d *AListV3) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error { + if !d.ForwardArchiveReq { + return errs.NotImplement + } + dir, name := path.Split(srcObj.GetPath()) + _, _, err := d.request("/fs/archive/decompress", http.MethodPost, func(req *resty.Request) { + req.SetBody(DecompressReq{ + ArchivePass: args.Password, + CacheFull: args.CacheFull, + DstDir: dstDir.GetPath(), + InnerPath: args.InnerPath, + Name: []string{name}, + PutIntoNewDir: args.PutIntoNewDir, + SrcDir: dir, + }) + }) + return err +} + //func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/alist_v3/meta.go b/drivers/alist_v3/meta.go index cc5f2189..1e8b3c53 100644 --- a/drivers/alist_v3/meta.go +++ b/drivers/alist_v3/meta.go @@ -7,12 +7,13 @@ import ( type Addition struct { driver.RootPath - Address string `json:"url" required:"true"` - MetaPassword string `json:"meta_password"` - Username string `json:"username"` - Password string `json:"password"` - Token string `json:"token"` - PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"` + Address string `json:"url" required:"true"` + MetaPassword string `json:"meta_password"` + Username string `json:"username"` + Password string `json:"password"` + Token string `json:"token"` + PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"` + ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"` } var config = driver.Config{ diff --git a/drivers/alist_v3/types.go b/drivers/alist_v3/types.go index e517307f..1ae7926e 100644 --- a/drivers/alist_v3/types.go +++ b/drivers/alist_v3/types.go @@ -4,6 +4,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" ) type ListReq struct { @@ -81,3 +82,89 @@ type MeResp struct { SsoId string `json:"sso_id"` Otp bool `json:"otp"` } + +type ArchiveMetaReq struct { + ArchivePass string `json:"archive_pass"` + Password string `json:"password"` + Path string `json:"path"` + Refresh bool `json:"refresh"` +} + +type TreeResp struct { + ObjResp + Children []TreeResp `json:"children"` + hashCache *utils.HashInfo +} + +func (t *TreeResp) GetSize() int64 { + return t.Size +} + +func (t *TreeResp) GetName() string { + return t.Name +} + +func (t *TreeResp) ModTime() time.Time { + return t.Modified +} + +func (t *TreeResp) CreateTime() time.Time { + return t.Created +} + +func (t *TreeResp) IsDir() bool { + return t.ObjResp.IsDir +} + +func (t *TreeResp) GetHash() utils.HashInfo { + return utils.FromString(t.HashInfo) +} + +func (t *TreeResp) GetID() string { + return "" +} + +func (t *TreeResp) GetPath() string { + return "" +} + +func (t *TreeResp) GetChildren() []model.ObjTree { + ret := make([]model.ObjTree, 0, len(t.Children)) + for _, child := range t.Children { + ret = append(ret, &child) + } + return ret +} + +func (t *TreeResp) Thumb() string { + return t.ObjResp.Thumb +} + +type ArchiveMetaResp struct { + Comment string `json:"comment"` + Encrypted bool `json:"encrypted"` + Content []TreeResp `json:"content"` + RawURL string `json:"raw_url"` + Sign string `json:"sign"` +} + +type ArchiveListReq struct { + model.PageReq + ArchiveMetaReq + InnerPath string `json:"inner_path"` +} + +type ArchiveListResp struct { + Content []ObjResp `json:"content"` + Total int64 `json:"total"` +} + +type DecompressReq struct { + ArchivePass string `json:"archive_pass"` + CacheFull bool `json:"cache_full"` + DstDir string `json:"dst_dir"` + InnerPath string `json:"inner_path"` + Name []string `json:"name"` + PutIntoNewDir bool `json:"put_into_new_dir"` + SrcDir string `json:"src_dir"` +} diff --git a/drivers/alist_v3/util.go b/drivers/alist_v3/util.go index 5ede285a..50c20250 100644 --- a/drivers/alist_v3/util.go +++ b/drivers/alist_v3/util.go @@ -17,7 +17,7 @@ func (d *AListV3) login() error { return nil } var resp common.Resp[LoginResp] - _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) { + _, _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) { req.SetResult(&resp).SetBody(base.Json{ "username": d.Username, "password": d.Password, @@ -31,7 +31,7 @@ func (d *AListV3) login() error { return nil } -func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) { +func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, int, error) { url := d.Address + "/api" + api req := base.RestyClient.R() req.SetHeader("Authorization", d.Token) @@ -40,22 +40,26 @@ func (d *AListV3) request(api, method string, callback base.ReqCallback, retry . } res, err := req.Execute(method, url) if err != nil { - return nil, err + code := 0 + if res != nil { + code = res.StatusCode() + } + return nil, code, err } log.Debugf("[alist_v3] response body: %s", res.String()) if res.StatusCode() >= 400 { - return nil, fmt.Errorf("request failed, status: %s", res.Status()) + return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status()) } code := utils.Json.Get(res.Body(), "code").ToInt() if code != 200 { if (code == 401 || code == 403) && !utils.IsBool(retry...) { err = d.login() if err != nil { - return nil, err + return nil, code, err } return d.request(api, method, callback, true) } - return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString()) + return nil, code, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString()) } - return res.Body(), nil + return res.Body(), 200, nil } diff --git a/drivers/aliyundrive/driver.go b/drivers/aliyundrive/driver.go index 2a977aa3..105e28b2 100644 --- a/drivers/aliyundrive/driver.go +++ b/drivers/aliyundrive/driver.go @@ -14,13 +14,12 @@ import ( "os" "time" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/cron" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" @@ -194,7 +193,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil } if d.RapidUpload { buf := bytes.NewBuffer(make([]byte, 0, 1024)) - utils.CopyWithBufferN(buf, file, 1024) + _, err := utils.CopyWithBufferN(buf, file, 1024) + if err != nil { + return err + } reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes()) if localFile != nil { if _, err := localFile.Seek(0, io.SeekStart); err != nil { @@ -286,6 +288,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil file.Reader = localFile } + rateLimited := driver.NewLimitedUploadStream(ctx, file) for i, partInfo := range resp.PartInfoList { if utils.IsCanceled(ctx) { return ctx.Err() @@ -294,7 +297,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil if d.InternalUpload { url = partInfo.InternalUploadUrl } - req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT)) + req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT)) if err != nil { return err } @@ -303,7 +306,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() if count > 0 { up(float64(i) * 100 / float64(count)) } diff --git a/drivers/aliyundrive_open/driver.go b/drivers/aliyundrive_open/driver.go index 4029ad57..394eadb1 100644 --- a/drivers/aliyundrive_open/driver.go +++ b/drivers/aliyundrive_open/driver.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "path/filepath" "time" "github.com/Xhofe/rateg" @@ -14,17 +15,18 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" ) type AliyundriveOpen struct { model.Storage Addition - base string DriveId string limitList func(ctx context.Context, data base.Json) (*Files, error) limitLink func(ctx context.Context, file model.Obj) (*model.Link, error) + ref *AliyundriveOpen } func (d *AliyundriveOpen) Config() driver.Config { @@ -58,10 +60,32 @@ func (d *AliyundriveOpen) Init(ctx context.Context) error { return nil } +func (d *AliyundriveOpen) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*AliyundriveOpen) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport +} + func (d *AliyundriveOpen) Drop(ctx context.Context) error { + d.ref = nil return nil } +// GetRoot implements the driver.GetRooter interface to properly set up the root object +func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) { + return &model.Object{ + ID: d.RootFolderID, + Path: "/", + Name: "root", + Size: 0, + Modified: d.Modified, + IsFolder: true, + }, nil +} + func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { if d.limitList == nil { return nil, fmt.Errorf("driver not init") @@ -70,9 +94,17 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li if err != nil { return nil, err } - return utils.SliceConvert(files, func(src File) (model.Obj, error) { - return fileToObj(src), nil + + objs, err := utils.SliceConvert(files, func(src File) (model.Obj, error) { + obj := fileToObj(src) + // Set the correct path for the object + if dir.GetPath() != "" { + obj.Path = filepath.Join(dir.GetPath(), obj.GetName()) + } + return obj, nil }) + + return objs, err } func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) { @@ -122,7 +154,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN if err != nil { return nil, err } - return fileToObj(newDir), nil + obj := fileToObj(newDir) + + // Set the correct Path for the returned directory object + if parentDir.GetPath() != "" { + obj.Path = filepath.Join(parentDir.GetPath(), dirName) + } else { + obj.Path = "/" + dirName + } + + return obj, nil } func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { @@ -132,20 +173,24 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m "drive_id": d.DriveId, "file_id": srcObj.GetID(), "to_parent_file_id": dstDir.GetID(), - "check_name_mode": "refuse", // optional:ignore,auto_rename,refuse + "check_name_mode": "ignore", // optional:ignore,auto_rename,refuse //"new_name": "newName", // The new name to use when a file of the same name exists }).SetResult(&resp) }) if err != nil { return nil, err } - if resp.Exist { - return nil, errors.New("existence of files with the same name") - } if srcObj, ok := srcObj.(*model.ObjThumb); ok { srcObj.ID = resp.FileID srcObj.Modified = time.Now() + srcObj.Path = filepath.Join(dstDir.GetPath(), srcObj.GetName()) + + // Check for duplicate files in the destination directory + if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), srcObj.GetID()); err != nil { + // Only log a warning instead of returning an error since the move operation has already completed successfully + log.Warnf("Failed to remove duplicate files after move: %v", err) + } return srcObj, nil } return nil, nil @@ -163,19 +208,47 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName if err != nil { return nil, err } - return fileToObj(newFile), nil + + // Check for duplicate files in the parent directory + parentPath := filepath.Dir(srcObj.GetPath()) + if err := d.removeDuplicateFiles(ctx, parentPath, newName, newFile.FileId); err != nil { + // Only log a warning instead of returning an error since the rename operation has already completed successfully + log.Warnf("Failed to remove duplicate files after rename: %v", err) + } + + obj := fileToObj(newFile) + + // Set the correct Path for the renamed object + if parentPath != "" && parentPath != "." { + obj.Path = filepath.Join(parentPath, newName) + } else { + obj.Path = "/" + newName + } + + return obj, nil } func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + var resp MoveOrCopyResp _, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "drive_id": d.DriveId, "file_id": srcObj.GetID(), "to_parent_file_id": dstDir.GetID(), - "auto_rename": true, - }) + "auto_rename": false, + }).SetResult(&resp) }) - return err + if err != nil { + return err + } + + // Check for duplicate files in the destination directory + if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), resp.FileID); err != nil { + // Only log a warning instead of returning an error since the copy operation has already completed successfully + log.Warnf("Failed to remove duplicate files after copy: %v", err) + } + + return nil } func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error { @@ -193,7 +266,18 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error { } func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - return d.upload(ctx, dstDir, stream, up) + obj, err := d.upload(ctx, dstDir, stream, up) + + // Set the correct Path for the returned file object + if obj != nil && obj.GetPath() == "" { + if dstDir.GetPath() != "" { + if objWithPath, ok := obj.(model.SetPath); ok { + objWithPath.SetPath(filepath.Join(dstDir.GetPath(), obj.GetName())) + } + } + } + + return obj, err } func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { @@ -225,3 +309,4 @@ var _ driver.MkdirResult = (*AliyundriveOpen)(nil) var _ driver.MoveResult = (*AliyundriveOpen)(nil) var _ driver.RenameResult = (*AliyundriveOpen)(nil) var _ driver.PutResult = (*AliyundriveOpen)(nil) +var _ driver.GetRooter = (*AliyundriveOpen)(nil) diff --git a/drivers/aliyundrive_open/meta.go b/drivers/aliyundrive_open/meta.go index de9b45e0..03f97f8b 100644 --- a/drivers/aliyundrive_open/meta.go +++ b/drivers/aliyundrive_open/meta.go @@ -6,7 +6,7 @@ import ( ) type Addition struct { - DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"` + DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"resource"` driver.RootID RefreshToken string `json:"refresh_token" required:"true"` OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"` @@ -32,11 +32,10 @@ var config = driver.Config{ DefaultRoot: "root", NoOverwriteUpload: true, } +var API_URL = "https://openapi.alipan.com" func init() { op.RegisterDriver(func() driver.Driver { - return &AliyundriveOpen{ - base: "https://openapi.alipan.com", - } + return &AliyundriveOpen{} }) } diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go index d152836c..4114c195 100644 --- a/drivers/aliyundrive_open/upload.go +++ b/drivers/aliyundrive_open/upload.go @@ -1,7 +1,6 @@ package aliyundrive_open import ( - "bytes" "context" "encoding/base64" "fmt" @@ -15,6 +14,7 @@ import ( "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + streamPkg "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" "github.com/avast/retry-go" @@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict { return fmt.Errorf("upload status: %d", res.StatusCode) } @@ -126,21 +126,24 @@ func getProofRange(input string, size int64) (*ProofRange, error) { } func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) { - proofRange, err := getProofRange(d.AccessToken, stream.GetSize()) + proofRange, err := getProofRange(d.getAccessToken(), stream.GetSize()) if err != nil { return "", err } length := proofRange.End - proofRange.Start - buf := bytes.NewBuffer(make([]byte, 0, length)) reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length}) if err != nil { return "", err } - _, err = utils.CopyWithBufferN(buf, reader, length) + buf := make([]byte, length) + n, err := io.ReadFull(reader, buf) + if err == io.ErrUnexpectedEOF { + return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n) + } if err != nil { return "", err } - return base64.StdEncoding.EncodeToString(buf.Bytes()), nil + return base64.StdEncoding.EncodeToString(buf), nil } func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { @@ -183,25 +186,18 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m _, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { req.SetBody(createData).SetResult(&createResp) }) - var tmpF model.File if err != nil { if e.Code != "PreHashMatched" || !rapidUpload { return nil, err } log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload") - hi := stream.GetHash() - hash := hi.GetHash(utils.SHA1) - if len(hash) <= 0 { - tmpF, err = stream.CacheFullInTempFile() + hash := stream.GetHash().GetHash(utils.SHA1) + if len(hash) != utils.SHA1.Width { + _, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1) if err != nil { return nil, err } - hash, err = utils.HashFile(utils.SHA1, tmpF) - if err != nil { - return nil, err - } - } delete(createData, "pre_hash") @@ -251,8 +247,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m rd = utils.NewMultiReadable(srd) } err = retry.Do(func() error { - rd.Reset() - return d.uploadPart(ctx, rd, createResp.PartInfoList[i]) + _ = rd.Reset() + rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) + return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i]) }, retry.Attempts(3), retry.DelayType(retry.BackOffDelay), diff --git a/drivers/aliyundrive_open/util.go b/drivers/aliyundrive_open/util.go index 331e6400..c3cda10a 100644 --- a/drivers/aliyundrive_open/util.go +++ b/drivers/aliyundrive_open/util.go @@ -10,6 +10,7 @@ import ( "time" "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" @@ -19,7 +20,7 @@ import ( // do others that not defined in Driver interface func (d *AliyundriveOpen) _refreshToken() (string, string, error) { - url := d.base + "/oauth/access_token" + url := API_URL + "/oauth/access_token" if d.OauthTokenURL != "" && d.ClientID == "" { url = d.OauthTokenURL } @@ -74,6 +75,9 @@ func getSub(token string) (string, error) { } func (d *AliyundriveOpen) refreshToken() error { + if d.ref != nil { + return d.ref.refreshToken() + } refresh, access, err := d._refreshToken() for i := 0; i < 3; i++ { if err == nil { @@ -100,7 +104,7 @@ func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) { req := base.RestyClient.R() // TODO check whether access_token is expired - req.SetHeader("Authorization", "Bearer "+d.AccessToken) + req.SetHeader("Authorization", "Bearer "+d.getAccessToken()) if method == http.MethodPost { req.SetHeader("Content-Type", "application/json") } @@ -109,7 +113,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base } var e ErrResp req.SetError(&e) - res, err := req.Execute(method, d.base+uri) + res, err := req.Execute(method, API_URL+uri) if err != nil { if res != nil { log.Errorf("[aliyundrive_open] request error: %s", res.String()) @@ -118,7 +122,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base } isRetry := len(retry) > 0 && retry[0] if e.Code != "" { - if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") { + if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") { err = d.refreshToken() if err != nil { return nil, err, nil @@ -176,3 +180,43 @@ func getNowTime() (time.Time, string) { nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z") return nowTime, nowTimeStr } + +func (d *AliyundriveOpen) getAccessToken() string { + if d.ref != nil { + return d.ref.getAccessToken() + } + return d.AccessToken +} + +// Remove duplicate files with the same name in the given directory path, +// preserving the file with the given skipID if provided +func (d *AliyundriveOpen) removeDuplicateFiles(ctx context.Context, parentPath string, fileName string, skipID string) error { + // Handle empty path (root directory) case + if parentPath == "" { + parentPath = "/" + } + + // List all files in the parent directory + files, err := op.List(ctx, d, parentPath, model.ListArgs{}) + if err != nil { + return err + } + + // Find all files with the same name + var duplicates []model.Obj + for _, file := range files { + if file.GetName() == fileName && file.GetID() != skipID { + duplicates = append(duplicates, file) + } + } + + // Remove all duplicates files, except the file with the given ID + for _, file := range duplicates { + err := d.Remove(ctx, file) + if err != nil { + return err + } + } + + return nil +} diff --git a/drivers/all.go b/drivers/all.go index 40062a1a..224fb8dd 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -2,6 +2,7 @@ package drivers import ( _ "github.com/alist-org/alist/v3/drivers/115" + _ "github.com/alist-org/alist/v3/drivers/115_open" _ "github.com/alist-org/alist/v3/drivers/115_share" _ "github.com/alist-org/alist/v3/drivers/123" _ "github.com/alist-org/alist/v3/drivers/123_link" @@ -15,14 +16,21 @@ import ( _ "github.com/alist-org/alist/v3/drivers/aliyundrive" _ "github.com/alist-org/alist/v3/drivers/aliyundrive_open" _ "github.com/alist-org/alist/v3/drivers/aliyundrive_share" + _ "github.com/alist-org/alist/v3/drivers/azure_blob" _ "github.com/alist-org/alist/v3/drivers/baidu_netdisk" _ "github.com/alist-org/alist/v3/drivers/baidu_photo" _ "github.com/alist-org/alist/v3/drivers/baidu_share" _ "github.com/alist-org/alist/v3/drivers/chaoxing" _ "github.com/alist-org/alist/v3/drivers/cloudreve" + _ "github.com/alist-org/alist/v3/drivers/cloudreve_v4" _ "github.com/alist-org/alist/v3/drivers/crypt" + _ "github.com/alist-org/alist/v3/drivers/doubao" + _ "github.com/alist-org/alist/v3/drivers/doubao_share" _ "github.com/alist-org/alist/v3/drivers/dropbox" + _ "github.com/alist-org/alist/v3/drivers/febbox" _ "github.com/alist-org/alist/v3/drivers/ftp" + _ "github.com/alist-org/alist/v3/drivers/github" + _ "github.com/alist-org/alist/v3/drivers/github_releases" _ "github.com/alist-org/alist/v3/drivers/google_drive" _ "github.com/alist-org/alist/v3/drivers/google_photo" _ "github.com/alist-org/alist/v3/drivers/halalcloud" @@ -34,6 +42,7 @@ import ( _ "github.com/alist-org/alist/v3/drivers/local" _ "github.com/alist-org/alist/v3/drivers/mediatrack" _ "github.com/alist-org/alist/v3/drivers/mega" + _ "github.com/alist-org/alist/v3/drivers/misskey" _ "github.com/alist-org/alist/v3/drivers/mopan" _ "github.com/alist-org/alist/v3/drivers/netease_music" _ "github.com/alist-org/alist/v3/drivers/onedrive" diff --git a/drivers/azure_blob/driver.go b/drivers/azure_blob/driver.go new file mode 100644 index 00000000..6836533a --- /dev/null +++ b/drivers/azure_blob/driver.go @@ -0,0 +1,313 @@ +package azure_blob + +import ( + "context" + "fmt" + "io" + "path" + "regexp" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" +) +// Azure Blob Storage based on the blob APIs +// Link: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api +type AzureBlob struct { + model.Storage + Addition + client *azblob.Client + containerClient *container.Client + config driver.Config +} + +// Config returns the driver configuration. +func (d *AzureBlob) Config() driver.Config { + return d.config +} + +// GetAddition returns additional settings specific to Azure Blob Storage. +func (d *AzureBlob) GetAddition() driver.Additional { + return &d.Addition +} + +// Init initializes the Azure Blob Storage client using shared key authentication. +func (d *AzureBlob) Init(ctx context.Context) error { + // Validate the endpoint URL + accountName := extractAccountName(d.Addition.Endpoint) + if !regexp.MustCompile(`^[a-z0-9]+$`).MatchString(accountName) { + return fmt.Errorf("invalid storage account name: must be chars of lowercase letters or numbers only") + } + + credential, err := azblob.NewSharedKeyCredential(accountName, d.Addition.AccessKey) + if err != nil { + return fmt.Errorf("failed to create credential: %w", err) + } + + // Check if Endpoint is just account name + endpoint := d.Addition.Endpoint + if accountName == endpoint { + endpoint = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + } + // Initialize Azure Blob client with retry policy + client, err := azblob.NewClientWithSharedKeyCredential(endpoint, credential, + &azblob.ClientOptions{ClientOptions: azcore.ClientOptions{ + Retry: policy.RetryOptions{ + MaxRetries: MaxRetries, + RetryDelay: RetryDelay, + }, + }}) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + d.client = client + + // Ensure container exists or create it + containerName := strings.Trim(d.Addition.ContainerName, "/ \\") + if containerName == "" { + return fmt.Errorf("container name cannot be empty") + } + return d.createContainerIfNotExists(ctx, containerName) +} + +// Drop releases resources associated with the Azure Blob client. +func (d *AzureBlob) Drop(ctx context.Context) error { + d.client = nil + return nil +} + +// List retrieves blobs and directories under the specified path. +func (d *AzureBlob) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + prefix := ensureTrailingSlash(dir.GetPath()) + + pager := d.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{ + Prefix: &prefix, + }) + + var objs []model.Obj + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list blobs: %w", err) + } + + // Process directories + for _, blobPrefix := range page.Segment.BlobPrefixes { + objs = append(objs, &model.Object{ + Name: path.Base(strings.TrimSuffix(*blobPrefix.Name, "/")), + Path: *blobPrefix.Name, + Modified: *blobPrefix.Properties.LastModified, + Ctime: *blobPrefix.Properties.CreationTime, + IsFolder: true, + }) + } + + // Process files + for _, blob := range page.Segment.BlobItems { + if strings.HasSuffix(*blob.Name, "/") { + continue + } + objs = append(objs, &model.Object{ + Name: path.Base(*blob.Name), + Path: *blob.Name, + Size: *blob.Properties.ContentLength, + Modified: *blob.Properties.LastModified, + Ctime: *blob.Properties.CreationTime, + IsFolder: false, + }) + } + } + return objs, nil +} + +// Link generates a temporary SAS URL for accessing a blob. +func (d *AzureBlob) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + blobClient := d.containerClient.NewBlobClient(file.GetPath()) + expireDuration := time.Hour * time.Duration(d.SignURLExpire) + + sasURL, err := blobClient.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil) + if err != nil { + return nil, fmt.Errorf("failed to generate SAS URL: %w", err) + } + return &model.Link{URL: sasURL}, nil +} + +// MakeDir creates a virtual directory by uploading an empty blob as a marker. +func (d *AzureBlob) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + dirPath := path.Join(parentDir.GetPath(), dirName) + if err := d.mkDir(ctx, dirPath); err != nil { + return nil, fmt.Errorf("failed to create directory marker: %w", err) + } + + return &model.Object{ + Path: dirPath, + Name: dirName, + IsFolder: true, + }, nil +} + +// Move relocates an object (file or directory) to a new directory. +func (d *AzureBlob) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + srcPath := srcObj.GetPath() + dstPath := path.Join(dstDir.GetPath(), srcObj.GetName()) + + if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil { + return nil, fmt.Errorf("move operation failed: %w", err) + } + + return &model.Object{ + Path: dstPath, + Name: srcObj.GetName(), + Modified: time.Now(), + IsFolder: srcObj.IsDir(), + Size: srcObj.GetSize(), + }, nil +} + +// Rename changes the name of an existing object. +func (d *AzureBlob) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + srcPath := srcObj.GetPath() + dstPath := path.Join(path.Dir(srcPath), newName) + + if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil { + return nil, fmt.Errorf("rename operation failed: %w", err) + } + + return &model.Object{ + Path: dstPath, + Name: newName, + Modified: time.Now(), + IsFolder: srcObj.IsDir(), + Size: srcObj.GetSize(), + }, nil +} + +// Copy duplicates an object (file or directory) to a specified destination directory. +func (d *AzureBlob) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + dstPath := path.Join(dstDir.GetPath(), srcObj.GetName()) + + // Handle directory copying using flat listing + if srcObj.IsDir() { + srcPrefix := srcObj.GetPath() + srcPrefix = ensureTrailingSlash(srcPrefix) + + // Get all blobs under the source directory + blobs, err := d.flattenListBlobs(ctx, srcPrefix) + if err != nil { + return nil, fmt.Errorf("failed to list source directory contents: %w", err) + } + + // Process each blob - copy to destination + for _, blob := range blobs { + // Skip the directory marker itself + if *blob.Name == srcPrefix { + continue + } + + // Calculate relative path from source + relPath := strings.TrimPrefix(*blob.Name, srcPrefix) + itemDstPath := path.Join(dstPath, relPath) + + if strings.HasSuffix(itemDstPath, "/") || (blob.Metadata["hdi_isfolder"] != nil && *blob.Metadata["hdi_isfolder"] == "true") { + // Create directory marker at destination + err := d.mkDir(ctx, itemDstPath) + if err != nil { + return nil, fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err) + } + } else { + // Copy the blob + if err := d.copyFile(ctx, *blob.Name, itemDstPath); err != nil { + return nil, fmt.Errorf("failed to copy %s: %w", *blob.Name, err) + } + } + + } + + // Create directory marker at destination if needed + if len(blobs) == 0 { + err := d.mkDir(ctx, dstPath) + if err != nil { + return nil, fmt.Errorf("failed to create directory [%s]: %w", dstPath, err) + } + } + + return &model.Object{ + Path: dstPath, + Name: srcObj.GetName(), + Modified: time.Now(), + IsFolder: true, + }, nil + } + + // Copy a single file + if err := d.copyFile(ctx, srcObj.GetPath(), dstPath); err != nil { + return nil, fmt.Errorf("failed to copy blob: %w", err) + } + return &model.Object{ + Path: dstPath, + Name: srcObj.GetName(), + Size: srcObj.GetSize(), + Modified: time.Now(), + IsFolder: false, + }, nil +} + +// Remove deletes a specified blob or recursively deletes a directory and its contents. +func (d *AzureBlob) Remove(ctx context.Context, obj model.Obj) error { + path := obj.GetPath() + + // Handle recursive directory deletion + if obj.IsDir() { + return d.deleteFolder(ctx, path) + } + + // Delete single file + return d.deleteFile(ctx, path, false) +} + +// Put uploads a file stream to Azure Blob Storage with progress tracking. +func (d *AzureBlob) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + blobPath := path.Join(dstDir.GetPath(), stream.GetName()) + blobClient := d.containerClient.NewBlockBlobClient(blobPath) + + // Determine optimal upload options based on file size + options := optimizedUploadOptions(stream.GetSize()) + + // Track upload progress + progressTracker := &progressTracker{ + total: stream.GetSize(), + updateProgress: up, + } + + // Wrap stream to handle context cancellation and progress tracking + limitedStream := driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, progressTracker)) + + // Upload the stream to Azure Blob Storage + _, err := blobClient.UploadStream(ctx, limitedStream, options) + if err != nil { + return nil, fmt.Errorf("failed to upload file: %w", err) + } + + return &model.Object{ + Path: blobPath, + Name: stream.GetName(), + Size: stream.GetSize(), + Modified: time.Now(), + IsFolder: false, + }, nil +} + +// The following methods related to archive handling are not implemented yet. +// func (d *AzureBlob) GetArchiveMeta(...) {...} +// func (d *AzureBlob) ListArchive(...) {...} +// func (d *AzureBlob) Extract(...) {...} +// func (d *AzureBlob) ArchiveDecompress(...) {...} + +// Ensure AzureBlob implements the driver.Driver interface. +var _ driver.Driver = (*AzureBlob)(nil) diff --git a/drivers/azure_blob/meta.go b/drivers/azure_blob/meta.go new file mode 100644 index 00000000..b1e021b8 --- /dev/null +++ b/drivers/azure_blob/meta.go @@ -0,0 +1,32 @@ +package azure_blob + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + Endpoint string `json:"endpoint" required:"true" default:"https://.blob.core.windows.net/" help:"e.g. https://accountname.blob.core.windows.net/. The full endpoint URL for Azure Storage, including the unique storage account name (3 ~ 24 numbers and lowercase letters only)."` + AccessKey string `json:"access_key" required:"true" help:"The access key for Azure Storage, used for authentication. https://learn.microsoft.com/azure/storage/common/storage-account-keys-manage"` + ContainerName string `json:"container_name" required:"true" help:"The name of the container in Azure Storage (created in the Azure portal). https://learn.microsoft.com/azure/storage/blobs/blob-containers-portal"` + SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."` +} + +// implement GetRootId interface +func (r Addition) GetRootId() string { + return r.ContainerName +} + +var config = driver.Config{ + Name: "Azure Blob Storage", + LocalSort: true, + CheckStatus: true, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &AzureBlob{ + config: config, + } + }) +} diff --git a/drivers/azure_blob/types.go b/drivers/azure_blob/types.go new file mode 100644 index 00000000..01323e51 --- /dev/null +++ b/drivers/azure_blob/types.go @@ -0,0 +1,20 @@ +package azure_blob + +import "github.com/alist-org/alist/v3/internal/driver" + +// progressTracker is used to track upload progress +type progressTracker struct { + total int64 + current int64 + updateProgress driver.UpdateProgress +} + +// Write implements io.Writer to track progress +func (pt *progressTracker) Write(p []byte) (n int, err error) { + n = len(p) + pt.current += int64(n) + if pt.updateProgress != nil && pt.total > 0 { + pt.updateProgress(float64(pt.current) * 100 / float64(pt.total)) + } + return n, nil +} diff --git a/drivers/azure_blob/util.go b/drivers/azure_blob/util.go new file mode 100644 index 00000000..2adf3a0f --- /dev/null +++ b/drivers/azure_blob/util.go @@ -0,0 +1,401 @@ +package azure_blob + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "path" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + log "github.com/sirupsen/logrus" +) + +const ( + // MaxRetries defines the maximum number of retry attempts for Azure operations + MaxRetries = 3 + // RetryDelay defines the base delay between retries + RetryDelay = 3 * time.Second + // MaxBatchSize defines the maximum number of operations in a single batch request + MaxBatchSize = 128 +) + +// extractAccountName 从 Azure 存储 Endpoint 中提取账户名 +func extractAccountName(endpoint string) string { + // 移除协议前缀 + endpoint = strings.TrimPrefix(endpoint, "https://") + endpoint = strings.TrimPrefix(endpoint, "http://") + + // 获取第一个点之前的部分(即账户名) + parts := strings.Split(endpoint, ".") + if len(parts) > 0 { + // to lower case + return strings.ToLower(parts[0]) + } + return "" +} + +// isNotFoundError checks if the error is a "not found" type error +func isNotFoundError(err error) bool { + var storageErr *azcore.ResponseError + if errors.As(err, &storageErr) { + return storageErr.StatusCode == 404 + } + // Fallback to string matching for backwards compatibility + return err != nil && strings.Contains(err.Error(), "BlobNotFound") +} + +// flattenListBlobs - Optimize blob listing to handle pagination better +func (d *AzureBlob) flattenListBlobs(ctx context.Context, prefix string) ([]container.BlobItem, error) { + // Standardize prefix format + prefix = ensureTrailingSlash(prefix) + + var blobItems []container.BlobItem + pager := d.containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Prefix: &prefix, + Include: container.ListBlobsInclude{ + Metadata: true, + }, + }) + + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list blobs: %w", err) + } + + for _, blob := range page.Segment.BlobItems { + blobItems = append(blobItems, *blob) + } + } + + return blobItems, nil +} + +// batchDeleteBlobs - Simplify batch deletion logic +func (d *AzureBlob) batchDeleteBlobs(ctx context.Context, blobPaths []string) error { + if len(blobPaths) == 0 { + return nil + } + + // Process in batches of MaxBatchSize + for i := 0; i < len(blobPaths); i += MaxBatchSize { + end := min(i+MaxBatchSize, len(blobPaths)) + currentBatch := blobPaths[i:end] + + // Create batch builder + batchBuilder, err := d.containerClient.NewBatchBuilder() + if err != nil { + return fmt.Errorf("failed to create batch builder: %w", err) + } + + // Add delete operations + for _, blobPath := range currentBatch { + if err := batchBuilder.Delete(blobPath, nil); err != nil { + return fmt.Errorf("failed to add delete operation for %s: %w", blobPath, err) + } + } + + // Submit batch + responses, err := d.containerClient.SubmitBatch(ctx, batchBuilder, nil) + if err != nil { + return fmt.Errorf("batch delete request failed: %w", err) + } + + // Check responses + for _, resp := range responses.Responses { + if resp.Error != nil && !isNotFoundError(resp.Error) { + // 获取 blob 名称以提供更好的错误信息 + blobName := "unknown" + if resp.BlobName != nil { + blobName = *resp.BlobName + } + return fmt.Errorf("failed to delete blob %s: %v", blobName, resp.Error) + } + } + } + + return nil +} + +// deleteFolder recursively deletes a directory and all its contents +func (d *AzureBlob) deleteFolder(ctx context.Context, prefix string) error { + // Ensure directory path ends with slash + prefix = ensureTrailingSlash(prefix) + + // Get all blobs under the directory using flattenListBlobs + globs, err := d.flattenListBlobs(ctx, prefix) + if err != nil { + return fmt.Errorf("failed to list blobs for deletion: %w", err) + } + + // If there are blobs in the directory, delete them + if len(globs) > 0 { + // 分离文件和目录标记 + var filePaths []string + var dirPaths []string + + for _, blob := range globs { + blobName := *blob.Name + if isDirectory(blob) { + // remove trailing slash for directory names + dirPaths = append(dirPaths, strings.TrimSuffix(blobName, "/")) + } else { + filePaths = append(filePaths, blobName) + } + } + + // 先删除文件,再删除目录 + if len(filePaths) > 0 { + if err := d.batchDeleteBlobs(ctx, filePaths); err != nil { + return err + } + } + if len(dirPaths) > 0 { + // 按路径深度分组 + depthMap := make(map[int][]string) + for _, dir := range dirPaths { + depth := strings.Count(dir, "/") // 计算目录深度 + depthMap[depth] = append(depthMap[depth], dir) + } + + // 按深度从大到小排序 + var depths []int + for depth := range depthMap { + depths = append(depths, depth) + } + sort.Sort(sort.Reverse(sort.IntSlice(depths))) + + // 按深度逐层批量删除 + for _, depth := range depths { + batch := depthMap[depth] + if err := d.batchDeleteBlobs(ctx, batch); err != nil { + return err + } + } + } + } + + // 最后删除目录标记本身 + return d.deleteEmptyDirectory(ctx, prefix) +} + +// deleteFile deletes a single file or blob with better error handling +func (d *AzureBlob) deleteFile(ctx context.Context, path string, isDir bool) error { + blobClient := d.containerClient.NewBlobClient(path) + _, err := blobClient.Delete(ctx, nil) + if err != nil && !(isDir && isNotFoundError(err)) { + return err + } + return nil +} + +// copyFile copies a single blob from source path to destination path +func (d *AzureBlob) copyFile(ctx context.Context, srcPath, dstPath string) error { + srcBlob := d.containerClient.NewBlobClient(srcPath) + dstBlob := d.containerClient.NewBlobClient(dstPath) + + // Use configured expiration time for SAS URL + expireDuration := time.Hour * time.Duration(d.SignURLExpire) + srcURL, err := srcBlob.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil) + if err != nil { + return fmt.Errorf("failed to generate source SAS URL: %w", err) + } + + _, err = dstBlob.StartCopyFromURL(ctx, srcURL, nil) + return err + +} + +// createContainerIfNotExists - Create container if not exists +// Clean up commented code +func (d *AzureBlob) createContainerIfNotExists(ctx context.Context, containerName string) error { + serviceClient := d.client.ServiceClient() + containerClient := serviceClient.NewContainerClient(containerName) + + var options = service.CreateContainerOptions{} + _, err := containerClient.Create(ctx, &options) + if err != nil { + var responseErr *azcore.ResponseError + if errors.As(err, &responseErr) && responseErr.ErrorCode != "ContainerAlreadyExists" { + return fmt.Errorf("failed to create or access container [%s]: %w", containerName, err) + } + } + + d.containerClient = containerClient + return nil +} + +// mkDir creates a virtual directory marker by uploading an empty blob with metadata. +func (d *AzureBlob) mkDir(ctx context.Context, fullDirName string) error { + dirPath := ensureTrailingSlash(fullDirName) + blobClient := d.containerClient.NewBlockBlobClient(dirPath) + + // Upload an empty blob with metadata indicating it's a directory + _, err := blobClient.Upload(ctx, struct { + *bytes.Reader + io.Closer + }{ + Reader: bytes.NewReader([]byte{}), + Closer: io.NopCloser(nil), + }, &blockblob.UploadOptions{ + Metadata: map[string]*string{ + "hdi_isfolder": to.Ptr("true"), + }, + }) + return err +} + +// ensureTrailingSlash ensures the provided path ends with a trailing slash. +func ensureTrailingSlash(path string) string { + if !strings.HasSuffix(path, "/") { + return path + "/" + } + return path +} + +// moveOrRename moves or renames blobs or directories from source to destination. +func (d *AzureBlob) moveOrRename(ctx context.Context, srcPath, dstPath string, isDir bool, srcSize int64) error { + if isDir { + // Normalize paths for directory operations + srcPath = ensureTrailingSlash(srcPath) + dstPath = ensureTrailingSlash(dstPath) + + // List all blobs under the source directory + blobs, err := d.flattenListBlobs(ctx, srcPath) + if err != nil { + return fmt.Errorf("failed to list blobs: %w", err) + } + + // Iterate and copy each blob to the destination + for _, item := range blobs { + srcBlobName := *item.Name + relPath := strings.TrimPrefix(srcBlobName, srcPath) + itemDstPath := path.Join(dstPath, relPath) + + if isDirectory(item) { + // Create directory marker at destination + if err := d.mkDir(ctx, itemDstPath); err != nil { + return fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err) + } + } else { + // Copy file blob to destination + if err := d.copyFile(ctx, srcBlobName, itemDstPath); err != nil { + return fmt.Errorf("failed to copy blob [%s]: %w", srcBlobName, err) + } + } + } + + // Handle empty directories by creating a marker at destination + if len(blobs) == 0 { + if err := d.mkDir(ctx, dstPath); err != nil { + return fmt.Errorf("failed to create directory [%s]: %w", dstPath, err) + } + } + + // Delete source directory and its contents + if err := d.deleteFolder(ctx, srcPath); err != nil { + log.Warnf("failed to delete source directory [%s]: %v\n, and try again", srcPath, err) + // Retry deletion once more and ignore the result + if err := d.deleteFolder(ctx, srcPath); err != nil { + log.Errorf("Retry deletion of source directory [%s] failed: %v", srcPath, err) + } + } + + return nil + } + + // Single file move or rename operation + if err := d.copyFile(ctx, srcPath, dstPath); err != nil { + return fmt.Errorf("failed to copy file: %w", err) + } + + // Delete source file after successful copy + if err := d.deleteFile(ctx, srcPath, false); err != nil { + log.Errorf("Error deleting source file [%s]: %v", srcPath, err) + } + return nil +} + +// optimizedUploadOptions returns the optimal upload options based on file size +func optimizedUploadOptions(fileSize int64) *azblob.UploadStreamOptions { + options := &azblob.UploadStreamOptions{ + BlockSize: 4 * 1024 * 1024, // 4MB block size + Concurrency: 4, // Default concurrency + } + + // For large files, increase block size and concurrency + if fileSize > 256*1024*1024 { // For files larger than 256MB + options.BlockSize = 8 * 1024 * 1024 // 8MB blocks + options.Concurrency = 8 // More concurrent uploads + } + + // For very large files (>1GB) + if fileSize > 1024*1024*1024 { + options.BlockSize = 16 * 1024 * 1024 // 16MB blocks + options.Concurrency = 16 // Higher concurrency + } + + return options +} + +// isDirectory determines if a blob represents a directory +// Checks multiple indicators: path suffix, metadata, and content type +func isDirectory(blob container.BlobItem) bool { + // Check path suffix + if strings.HasSuffix(*blob.Name, "/") { + return true + } + + // Check metadata for directory marker + if blob.Metadata != nil { + if val, ok := blob.Metadata["hdi_isfolder"]; ok && val != nil && *val == "true" { + return true + } + // Azure Storage Explorer and other tools may use different metadata keys + if val, ok := blob.Metadata["is_directory"]; ok && val != nil && strings.ToLower(*val) == "true" { + return true + } + } + + // Check content type (some tools mark directories with specific content types) + if blob.Properties != nil && blob.Properties.ContentType != nil { + contentType := strings.ToLower(*blob.Properties.ContentType) + if blob.Properties.ContentLength != nil && *blob.Properties.ContentLength == 0 && (contentType == "application/directory" || contentType == "directory") { + return true + } + } + + return false +} + +// deleteEmptyDirectory deletes a directory only if it's empty +func (d *AzureBlob) deleteEmptyDirectory(ctx context.Context, dirPath string) error { + // Directory is empty, delete the directory marker + blobClient := d.containerClient.NewBlobClient(strings.TrimSuffix(dirPath, "/")) + _, err := blobClient.Delete(ctx, nil) + + // Also try deleting with trailing slash (for different directory marker formats) + if err != nil && isNotFoundError(err) { + blobClient = d.containerClient.NewBlobClient(dirPath) + _, err = blobClient.Delete(ctx, nil) + } + + // Ignore not found errors + if err != nil && isNotFoundError(err) { + log.Infof("Directory [%s] not found during deletion: %v", dirPath, err) + return nil + } + + return err +} diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index ad52a4b5..c33e0b32 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -6,13 +6,16 @@ import ( "encoding/hex" "errors" "io" - "math" "net/url" + "os" stdpath "path" "strconv" "time" + "golang.org/x/sync/semaphore" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" @@ -76,6 +79,8 @@ func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListA func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { if d.DownloadAPI == "crack" { return d.linkCrack(file, args) + } else if d.DownloadAPI == "crack_video" { + return d.linkCrackVideo(file, args) } return d.linkOfficial(file, args) } @@ -181,21 +186,35 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F return newObj, nil } - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return nil, err + var ( + cache = stream.GetFile() + tmpF *os.File + err error + ) + if _, ok := cache.(io.ReaderAt); !ok { + tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return nil, err + } + defer func() { + _ = tmpF.Close() + _ = os.Remove(tmpF.Name()) + }() + cache = tmpF } streamSize := stream.GetSize() - sliceSize := d.getSliceSize() - count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1)) + sliceSize := d.getSliceSize(streamSize) + count := int(streamSize / sliceSize) lastBlockSize := streamSize % sliceSize - if streamSize > 0 && lastBlockSize == 0 { + if lastBlockSize > 0 { + count++ + } else { lastBlockSize = sliceSize } //cal md5 for first 256k data - const SliceSize int64 = 256 * 1024 + const SliceSize int64 = 256 * utils.KB // cal md5 blockList := make([]string, 0, count) byteSize := sliceSize @@ -203,6 +222,11 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F sliceMd5H := md5.New() sliceMd5H2 := md5.New() slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize) + writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write} + if tmpF != nil { + writers = append(writers, tmpF) + } + written := int64(0) for i := 1; i <= count; i++ { if utils.IsCanceled(ctx) { @@ -211,13 +235,23 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F if i == count { byteSize = lastBlockSize } - _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize) + n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize) + written += n if err != nil && err != io.EOF { return nil, err } blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil))) sliceMd5H.Reset() } + if tmpF != nil { + if written != streamSize { + return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize) + } + _, err = tmpF.Seek(0, io.SeekStart) + if err != nil { + return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ") + } + } contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil)) sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil)) blockListStr, _ := utils.Json.MarshalToString(blockList) @@ -260,9 +294,10 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F } // step.2 上传分片 threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, - retry.Attempts(3), + retry.Attempts(1), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + sem := semaphore.NewWeighted(3) for i, partseq := range precreateResp.BlockList { if utils.IsCanceled(upCtx) { break @@ -273,6 +308,10 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F byteSize = lastBlockSize } threadG.Go(func(ctx context.Context) error { + if err = sem.Acquire(ctx, 1); err != nil { + return err + } + defer sem.Release(1) params := map[string]string{ "method": "upload", "access_token": d.AccessToken, @@ -281,7 +320,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F "uploadid": precreateResp.Uploadid, "partseq": strconv.Itoa(partseq), } - err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize)) + err := d.uploadSlice(ctx, params, stream.GetName(), + driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))) if err != nil { return err } diff --git a/drivers/baidu_netdisk/meta.go b/drivers/baidu_netdisk/meta.go index bf2aed5a..27571056 100644 --- a/drivers/baidu_netdisk/meta.go +++ b/drivers/baidu_netdisk/meta.go @@ -8,16 +8,18 @@ import ( type Addition struct { RefreshToken string `json:"refresh_token" required:"true"` driver.RootPath - OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"` - OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` - DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"` - ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` - ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` - CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"` - AccessToken string - UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` - UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"` - CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` + OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"` + OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` + DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"` + ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` + ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` + CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"` + AccessToken string + UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` + UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"` + CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` + LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"` + OnlyListVideoFile bool `json:"only_list_video_file" default:"false"` } var config = driver.Config{ diff --git a/drivers/baidu_netdisk/types.go b/drivers/baidu_netdisk/types.go index cbec0bcf..ed9b09df 100644 --- a/drivers/baidu_netdisk/types.go +++ b/drivers/baidu_netdisk/types.go @@ -6,6 +6,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" ) type TokenErrResp struct { @@ -16,7 +17,7 @@ type TokenErrResp struct { type File struct { //TkbindId int `json:"tkbind_id"` //OwnerType int `json:"owner_type"` - //Category int `json:"category"` + Category int `json:"category"` //RealCategory string `json:"real_category"` FsId int64 `json:"fs_id"` //OperId int `json:"oper_id"` @@ -55,11 +56,11 @@ func fileToObj(f File) *model.ObjThumb { if f.ServerFilename == "" { f.ServerFilename = path.Base(f.Path) } - if f.LocalCtime == 0 { - f.LocalCtime = f.Ctime + if f.ServerCtime == 0 { + f.ServerCtime = f.Ctime } - if f.LocalMtime == 0 { - f.LocalMtime = f.Mtime + if f.ServerMtime == 0 { + f.ServerMtime = f.Mtime } return &model.ObjThumb{ Object: model.Object{ @@ -67,12 +68,12 @@ func fileToObj(f File) *model.ObjThumb { Path: f.Path, Name: f.ServerFilename, Size: f.Size, - Modified: time.Unix(f.LocalMtime, 0), - Ctime: time.Unix(f.LocalCtime, 0), + Modified: time.Unix(f.ServerMtime, 0), + Ctime: time.Unix(f.ServerCtime, 0), IsFolder: f.Isdir == 1, // 直接获取的MD5是错误的 - // HashInfo: utils.NewHashInfo(utils.MD5, f.Md5), + HashInfo: utils.NewHashInfo(utils.MD5, DecryptMd5(f.Md5)), }, Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3}, } diff --git a/drivers/baidu_netdisk/util.go b/drivers/baidu_netdisk/util.go index ac1f06e8..1249b3f4 100644 --- a/drivers/baidu_netdisk/util.go +++ b/drivers/baidu_netdisk/util.go @@ -1,11 +1,14 @@ package baidu_netdisk import ( + "encoding/hex" "errors" "fmt" "net/http" "strconv" + "strings" "time" + "unicode" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/errs" @@ -76,6 +79,12 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall return retry.Unrecoverable(err2) } } + + if 31023 == errno && d.DownloadAPI == "crack_video" { + result = res.Body() + return nil + } + return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno) } result = res.Body() @@ -128,12 +137,21 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) { if len(resp.List) == 0 { break } - res = append(res, resp.List...) + + if d.OnlyListVideoFile { + for _, file := range resp.List { + if file.Isdir == 1 || file.Category == 1 { + res = append(res, file) + } + } + } else { + res = append(res, resp.List...) + } } return res, nil } -func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model.Link, error) { +func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Link, error) { var resp DownloadResp params := map[string]string{ "method": "filemetas", @@ -153,8 +171,6 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model u = res.Header().Get("location") //} - updateObjMd5(file, "pan.baidu.com", u) - return &model.Link{ URL: u, Header: http.Header{ @@ -163,7 +179,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model }, nil } -func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Link, error) { +func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link, error) { var resp DownloadResp2 param := map[string]string{ "target": fmt.Sprintf("[\"%s\"]", file.GetPath()), @@ -178,8 +194,6 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li return nil, err } - updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink) - return &model.Link{ URL: resp.Info[0].Dlink, Header: http.Header{ @@ -188,6 +202,34 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li }, nil } +func (d *BaiduNetdisk) linkCrackVideo(file model.Obj, _ model.LinkArgs) (*model.Link, error) { + param := map[string]string{ + "type": "VideoURL", + "path": fmt.Sprintf("%s", file.GetPath()), + "fs_id": file.GetID(), + "devuid": "0%1", + "clienttype": "1", + "channel": "android_15_25010PN30C_bd-netdisk_1523a", + "nom3u8": "1", + "dlink": "1", + "media": "1", + "origin": "dlna", + } + resp, err := d.request("https://pan.baidu.com/api/mediainfo", http.MethodGet, func(req *resty.Request) { + req.SetQueryParams(param) + }, nil) + if err != nil { + return nil, err + } + + return &model.Link{ + URL: utils.Json.Get(resp, "info", "dlink").ToString(), + Header: http.Header{ + "User-Agent": []string{d.CustomCrackUA}, + }, + }, nil +} + func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) { params := map[string]string{ "method": "filemanager", @@ -229,37 +271,74 @@ func joinTime(form map[string]string, ctime, mtime int64) { form["local_ctime"] = strconv.FormatInt(ctime, 10) } -func updateObjMd5(obj model.Obj, userAgent, u string) { - object := model.GetRawObject(obj) - if object != nil { - req, _ := http.NewRequest(http.MethodHead, u, nil) - req.Header.Add("User-Agent", userAgent) - resp, _ := base.HttpClient.Do(req) - if resp != nil { - contentMd5 := resp.Header.Get("Content-Md5") - object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5) - } - } -} - const ( DefaultSliceSize int64 = 4 * utils.MB - VipSliceSize = 16 * utils.MB - SVipSliceSize = 32 * utils.MB + VipSliceSize int64 = 16 * utils.MB + SVipSliceSize int64 = 32 * utils.MB + + MaxSliceNum = 2048 // 文档写的是 1024/没写 ,但实际测试是 2048 + SliceStep int64 = 1 * utils.MB ) -func (d *BaiduNetdisk) getSliceSize() int64 { - if d.CustomUploadPartSize != 0 { - return d.CustomUploadPartSize - } - switch d.vipType { - case 1: - return VipSliceSize - case 2: - return SVipSliceSize - default: +func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 { + // 非会员固定为 4MB + if d.vipType == 0 { + if d.CustomUploadPartSize != 0 { + log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize") + } + if filesize > MaxSliceNum*DefaultSliceSize { + log.Warnf("File size(%d) is too large, may cause upload failure", filesize) + } + return DefaultSliceSize } + + if d.CustomUploadPartSize != 0 { + if d.CustomUploadPartSize < DefaultSliceSize { + log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize) + return DefaultSliceSize + } + + if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize { + log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize) + return VipSliceSize + } + + if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize { + log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize) + return SVipSliceSize + } + + return d.CustomUploadPartSize + } + + maxSliceSize := DefaultSliceSize + + switch d.vipType { + case 1: + maxSliceSize = VipSliceSize + case 2: + maxSliceSize = SVipSliceSize + } + + // upload on low bandwidth + if d.LowBandwithUploadMode { + size := DefaultSliceSize + + for size <= maxSliceSize { + if filesize <= MaxSliceNum*size { + return size + } + + size += SliceStep + } + } + + if filesize > MaxSliceNum*maxSliceSize { + log.Warnf("File size(%d) is too large, may cause upload failure", filesize) + } + + return maxSliceSize } // func encodeURIComponent(str string) string { @@ -267,3 +346,40 @@ func (d *BaiduNetdisk) getSliceSize() int64 { // r = strings.ReplaceAll(r, "+", "%20") // return r // } + +func DecryptMd5(encryptMd5 string) string { + if _, err := hex.DecodeString(encryptMd5); err == nil { + return encryptMd5 + } + + var out strings.Builder + out.Grow(len(encryptMd5)) + for i, n := 0, int64(0); i < len(encryptMd5); i++ { + if i == 9 { + n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g') + } else { + n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64) + } + out.WriteString(strconv.FormatInt(n^int64(15&i), 16)) + } + + encryptMd5 = out.String() + return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24] +} + +func EncryptMd5(originalMd5 string) string { + reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24] + + var out strings.Builder + out.Grow(len(reversed)) + for i, n := 0, int64(0); i < len(reversed); i++ { + n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64) + n ^= int64(15 & i) + if i == 9 { + out.WriteRune(rune(n) + 'g') + } else { + out.WriteString(strconv.FormatInt(n, 16)) + } + } + return out.String() +} diff --git a/drivers/baidu_photo/driver.go b/drivers/baidu_photo/driver.go index 94716983..5a34fcb4 100644 --- a/drivers/baidu_photo/driver.go +++ b/drivers/baidu_photo/driver.go @@ -7,13 +7,16 @@ import ( "errors" "fmt" "io" - "math" + "os" "regexp" "strconv" "strings" "time" + "golang.org/x/sync/semaphore" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" @@ -27,9 +30,10 @@ type BaiduPhoto struct { model.Storage Addition - AccessToken string - Uk int64 - root model.Obj + // AccessToken string + Uk int64 + bdstoken string + root model.Obj uploadThread int } @@ -48,9 +52,9 @@ func (d *BaiduPhoto) Init(ctx context.Context) error { d.uploadThread, d.UploadThread = 3, "3" } - if err := d.refreshToken(); err != nil { - return err - } + // if err := d.refreshToken(); err != nil { + // return err + // } // root if d.AlbumID != "" { @@ -73,6 +77,10 @@ func (d *BaiduPhoto) Init(ctx context.Context) error { if err != nil { return err } + d.bdstoken, err = d.getBDStoken() + if err != nil { + return err + } d.Uk, err = strconv.ParseInt(info.YouaID, 10, 64) return err } @@ -82,7 +90,7 @@ func (d *BaiduPhoto) GetRoot(ctx context.Context) (model.Obj, error) { } func (d *BaiduPhoto) Drop(ctx context.Context) error { - d.AccessToken = "" + // d.AccessToken = "" d.Uk = 0 d.root = nil return nil @@ -140,14 +148,13 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr // 处理共享相册 if d.Uk != file.Uk { // 有概率无法获取到链接 - return d.linkAlbum(ctx, file, args) + // return d.linkAlbum(ctx, file, args) - // 接口被限制,只能使用cookie - // f, err := d.CopyAlbumFile(ctx, file) - // if err != nil { - // return nil, err - // } - // return d.linkFile(ctx, f, args) + f, err := d.CopyAlbumFile(ctx, file) + if err != nil { + return nil, err + } + return d.linkFile(ctx, f, args) } return d.linkFile(ctx, &file.File, args) } @@ -235,11 +242,21 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil // TODO: // 暂时没有找到妙传方式 - - // 需要获取完整文件md5,必须支持 io.Seek - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return nil, err + var ( + cache = stream.GetFile() + tmpF *os.File + err error + ) + if _, ok := cache.(io.ReaderAt); !ok { + tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return nil, err + } + defer func() { + _ = tmpF.Close() + _ = os.Remove(tmpF.Name()) + }() + cache = tmpF } const DEFAULT int64 = 1 << 22 @@ -247,9 +264,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil // 计算需要的数据 streamSize := stream.GetSize() - count := int(math.Ceil(float64(streamSize) / float64(DEFAULT))) + count := int(streamSize / DEFAULT) lastBlockSize := streamSize % DEFAULT - if lastBlockSize == 0 { + if lastBlockSize > 0 { + count++ + } else { lastBlockSize = DEFAULT } @@ -260,6 +279,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil sliceMd5H := md5.New() sliceMd5H2 := md5.New() slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize) + writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write} + if tmpF != nil { + writers = append(writers, tmpF) + } + written := int64(0) for i := 1; i <= count; i++ { if utils.IsCanceled(ctx) { return nil, ctx.Err() @@ -267,13 +291,23 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if i == count { byteSize = lastBlockSize } - _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize) + n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize) + written += n if err != nil && err != io.EOF { return nil, err } sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil))) sliceMd5H.Reset() } + if tmpF != nil { + if written != streamSize { + return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize) + } + _, err = tmpF.Seek(0, io.SeekStart) + if err != nil { + return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ") + } + } contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil)) sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil)) blockListStr, _ := utils.Json.MarshalToString(sliceMD5List) @@ -285,18 +319,19 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil "rtype": "1", "ctype": "11", "path": fmt.Sprintf("/%s", stream.GetName()), - "size": fmt.Sprint(stream.GetSize()), + "size": fmt.Sprint(streamSize), "slice-md5": sliceMd5, "content-md5": contentMd5, "block_list": blockListStr, } // 尝试获取之前的进度 - precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5) + precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, strconv.FormatInt(d.Uk, 10), contentMd5) if !ok { _, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) { r.SetContext(ctx) r.SetFormData(params) + r.SetQueryParam("bdstoken", d.bdstoken) }, &precreateResp) if err != nil { return nil, err @@ -309,6 +344,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + sem := semaphore.NewWeighted(3) for i, partseq := range precreateResp.BlockList { if utils.IsCanceled(upCtx) { break @@ -320,17 +356,22 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil } threadG.Go(func(ctx context.Context) error { + if err = sem.Acquire(ctx, 1); err != nil { + return err + } + defer sem.Release(1) uploadParams := map[string]string{ "method": "upload", "path": params["path"], "partseq": fmt.Sprint(partseq), "uploadid": precreateResp.UploadID, + "app_id": "16051585", } - _, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) { r.SetContext(ctx) r.SetQueryParams(uploadParams) - r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize)) + r.SetFileReader("file", stream.GetName(), + driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize))) }, nil) if err != nil { return err @@ -343,7 +384,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if err = threadG.Wait(); err != nil { if errors.Is(err, context.Canceled) { precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 }) - base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5) + base.SaveUploadProgress(d, strconv.FormatInt(d.Uk, 10), contentMd5) } return nil, err } @@ -353,6 +394,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil _, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) { r.SetContext(ctx) r.SetFormData(params) + r.SetQueryParam("bdstoken", d.bdstoken) }, &precreateResp) if err != nil { return nil, err diff --git a/drivers/baidu_photo/meta.go b/drivers/baidu_photo/meta.go index da2229f5..3bc2f622 100644 --- a/drivers/baidu_photo/meta.go +++ b/drivers/baidu_photo/meta.go @@ -6,13 +6,14 @@ import ( ) type Addition struct { - RefreshToken string `json:"refresh_token" required:"true"` - ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"` - AlbumID string `json:"album_id"` + // RefreshToken string `json:"refresh_token" required:"true"` + Cookie string `json:"cookie" required:"true"` + ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"` + AlbumID string `json:"album_id"` //AlbumPassword string `json:"album_password"` - DeleteOrigin bool `json:"delete_origin"` - ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` - ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` + DeleteOrigin bool `json:"delete_origin"` + // ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` + // ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` } diff --git a/drivers/baidu_photo/types.go b/drivers/baidu_photo/types.go index 2bbacd30..0e5cbb2c 100644 --- a/drivers/baidu_photo/types.go +++ b/drivers/baidu_photo/types.go @@ -72,7 +72,7 @@ func (c *File) Thumb() string { } func (c *File) GetHash() utils.HashInfo { - return utils.NewHashInfo(utils.MD5, c.Md5) + return utils.NewHashInfo(utils.MD5, DecryptMd5(c.Md5)) } /*相册部分*/ diff --git a/drivers/baidu_photo/utils.go b/drivers/baidu_photo/utils.go index be0ed133..6061600e 100644 --- a/drivers/baidu_photo/utils.go +++ b/drivers/baidu_photo/utils.go @@ -2,13 +2,15 @@ package baiduphoto import ( "context" + "encoding/hex" "fmt" "net/http" + "strconv" + "strings" + "unicode" "github.com/alist-org/alist/v3/drivers/base" - "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" ) @@ -23,7 +25,8 @@ const ( func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) { req := client.R(). - SetQueryParam("access_token", d.AccessToken) + // SetQueryParam("access_token", d.AccessToken) + SetHeader("Cookie", d.Cookie) if callback != nil { callback(req) } @@ -45,10 +48,10 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c return nil, fmt.Errorf("no shared albums found") case 50100: return nil, fmt.Errorf("illegal title, only supports 50 characters") - case -6: - if err = d.refreshToken(); err != nil { - return nil, err - } + // case -6: + // if err = d.refreshToken(); err != nil { + // return nil, err + // } default: return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron) } @@ -63,29 +66,29 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c // return res.Body(), nil //} -func (d *BaiduPhoto) refreshToken() error { - u := "https://openapi.baidu.com/oauth/2.0/token" - var resp base.TokenResp - var e TokenErrResp - _, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{ - "grant_type": "refresh_token", - "refresh_token": d.RefreshToken, - "client_id": d.ClientID, - "client_secret": d.ClientSecret, - }).Get(u) - if err != nil { - return err - } - if e.ErrorMsg != "" { - return &e - } - if resp.RefreshToken == "" { - return errs.EmptyToken - } - d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken - op.MustSaveDriverStorage(d) - return nil -} +// func (d *BaiduPhoto) refreshToken() error { +// u := "https://openapi.baidu.com/oauth/2.0/token" +// var resp base.TokenResp +// var e TokenErrResp +// _, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{ +// "grant_type": "refresh_token", +// "refresh_token": d.RefreshToken, +// "client_id": d.ClientID, +// "client_secret": d.ClientSecret, +// }).Get(u) +// if err != nil { +// return err +// } +// if e.ErrorMsg != "" { +// return &e +// } +// if resp.RefreshToken == "" { +// return errs.EmptyToken +// } +// d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken +// op.MustSaveDriverStorage(d) +// return nil +// } func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) { return d.Request(base.RestyClient, furl, http.MethodGet, callback, resp) @@ -359,10 +362,6 @@ func (d *BaiduPhoto) linkAlbum(ctx context.Context, file *AlbumFile, args model. location := resp.Header().Get("Location") - if err != nil { - return nil, err - } - link := &model.Link{ URL: location, Header: http.Header{ @@ -384,36 +383,36 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr headers["X-Forwarded-For"] = args.IP } - // var downloadUrl struct { - // Dlink string `json:"dlink"` - // } - // _, err := d.Get(FILE_API_URL_V1+"/download", func(r *resty.Request) { - // r.SetContext(ctx) - // r.SetHeaders(headers) - // r.SetQueryParams(map[string]string{ - // "fsid": fmt.Sprint(file.Fsid), - // }) - // }, &downloadUrl) - - resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) { + var downloadUrl struct { + Dlink string `json:"dlink"` + } + _, err := d.Get(FILE_API_URL_V2+"/download", func(r *resty.Request) { r.SetContext(ctx) r.SetHeaders(headers) r.SetQueryParams(map[string]string{ "fsid": fmt.Sprint(file.Fsid), }) - }, nil) + }, &downloadUrl) + + // resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) { + // r.SetContext(ctx) + // r.SetHeaders(headers) + // r.SetQueryParams(map[string]string{ + // "fsid": fmt.Sprint(file.Fsid), + // }) + // }, nil) if err != nil { return nil, err } - if resp.StatusCode() != 302 { - return nil, fmt.Errorf("not found 302 redirect") - } + // if resp.StatusCode() != 302 { + // return nil, fmt.Errorf("not found 302 redirect") + // } - location := resp.Header().Get("Location") + // location := resp.Header().Get("Location") link := &model.Link{ - URL: location, + URL: downloadUrl.Dlink, Header: http.Header{ "User-Agent": []string{headers["User-Agent"]}, "Referer": []string{"https://photo.baidu.com/"}, @@ -476,3 +475,55 @@ func (d *BaiduPhoto) uInfo() (*UInfo, error) { } return &info, nil } + +func (d *BaiduPhoto) getBDStoken() (string, error) { + var info struct { + Result struct { + Bdstoken string `json:"bdstoken"` + Token string `json:"token"` + Uk int64 `json:"uk"` + } `json:"result"` + } + _, err := d.Get("https://pan.baidu.com/api/gettemplatevariable?fields=[%22bdstoken%22,%22token%22,%22uk%22]", nil, &info) + if err != nil { + return "", err + } + return info.Result.Bdstoken, nil +} + +func DecryptMd5(encryptMd5 string) string { + if _, err := hex.DecodeString(encryptMd5); err == nil { + return encryptMd5 + } + + var out strings.Builder + out.Grow(len(encryptMd5)) + for i, n := 0, int64(0); i < len(encryptMd5); i++ { + if i == 9 { + n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g') + } else { + n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64) + } + out.WriteString(strconv.FormatInt(n^int64(15&i), 16)) + } + + encryptMd5 = out.String() + return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24] +} + +func EncryptMd5(originalMd5 string) string { + reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24] + + var out strings.Builder + out.Grow(len(reversed)) + for i, n := 0, int64(0); i < len(reversed); i++ { + n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64) + n ^= int64(15 & i) + if i == 9 { + out.WriteRune(rune(n) + 'g') + } else { + out.WriteString(strconv.FormatInt(n, 16)) + } + } + return out.String() +} diff --git a/drivers/base/client.go b/drivers/base/client.go index 8bf8f421..538c43a6 100644 --- a/drivers/base/client.go +++ b/drivers/base/client.go @@ -6,6 +6,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/net" "github.com/go-resty/resty/v2" ) @@ -26,7 +27,7 @@ func InitClient() { NoRedirectClient.SetHeader("user-agent", UserAgent) RestyClient = NewRestyClient() - HttpClient = NewHttpClient() + HttpClient = net.NewHttpClient() } func NewRestyClient() *resty.Client { @@ -38,13 +39,3 @@ func NewRestyClient() *resty.Client { SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}) return client } - -func NewHttpClient() *http.Client { - return &http.Client{ - Timeout: time.Hour * 48, - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}, - }, - } -} diff --git a/drivers/chaoxing/driver.go b/drivers/chaoxing/driver.go index de122c36..bf01a83b 100644 --- a/drivers/chaoxing/driver.go +++ b/drivers/chaoxing/driver.go @@ -67,7 +67,9 @@ func (d *ChaoXing) Init(ctx context.Context) error { } func (d *ChaoXing) Drop(ctx context.Context) error { - d.cron.Stop() + if d.cron != nil { + d.cron.Stop() + } return nil } @@ -213,7 +215,7 @@ func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error { return nil } -func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { var resp UploadDataRsp _, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) { }, &resp) @@ -225,11 +227,11 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS } body := &bytes.Buffer{} writer := multipart.NewWriter(body) - filePart, err := writer.CreateFormFile("file", stream.GetName()) + filePart, err := writer.CreateFormFile("file", file.GetName()) if err != nil { return err } - _, err = utils.CopyWithBuffer(filePart, stream) + _, err = utils.CopyWithBuffer(filePart, file) if err != nil { return err } @@ -246,7 +248,14 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS if err != nil { return err } - req, err := http.NewRequest("POST", "https://pan-yz.chaoxing.com/upload", body) + r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ + Reader: body, + Size: int64(body.Len()), + }, + UpdateProgress: up, + }) + req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r) if err != nil { return err } diff --git a/drivers/cloudreve/driver.go b/drivers/cloudreve/driver.go index dc6d1b13..dcde58c6 100644 --- a/drivers/cloudreve/driver.go +++ b/drivers/cloudreve/driver.go @@ -4,11 +4,12 @@ import ( "context" "io" "net/http" - "strconv" + "path" "strings" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" @@ -17,6 +18,7 @@ import ( type Cloudreve struct { model.Storage Addition + ref *Cloudreve } func (d *Cloudreve) Config() driver.Config { @@ -36,8 +38,18 @@ func (d *Cloudreve) Init(ctx context.Context) error { return d.login() } +func (d *Cloudreve) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*Cloudreve) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport +} + func (d *Cloudreve) Drop(ctx context.Context) error { d.Cookie = "" + d.ref = nil return nil } @@ -90,7 +102,7 @@ func (d *Cloudreve) MakeDir(ctx context.Context, parentDir model.Obj, dirName st func (d *Cloudreve) Move(ctx context.Context, srcObj, dstDir model.Obj) error { body := base.Json{ "action": "move", - "src_dir": srcObj.GetPath(), + "src_dir": path.Dir(srcObj.GetPath()), "dst": dstDir.GetPath(), "src": convertSrc(srcObj), } @@ -112,7 +124,7 @@ func (d *Cloudreve) Rename(ctx context.Context, srcObj model.Obj, newName string func (d *Cloudreve) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { body := base.Json{ - "src_dir": srcObj.GetPath(), + "src_dir": path.Dir(srcObj.GetPath()), "dst": dstDir.GetPath(), "src": convertSrc(srcObj), } @@ -133,6 +145,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File if io.ReadCloser(stream) == http.NoBody { return d.create(ctx, dstDir, stream) } + + // 获取存储策略 var r DirectoryResp err := d.request(http.MethodGet, "/directory"+dstDir.GetPath(), nil, &r) if err != nil { @@ -143,8 +157,10 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File "size": stream.GetSize(), "name": stream.GetName(), "policy_id": r.Policy.Id, - "last_modified": stream.ModTime().Unix(), + "last_modified": stream.ModTime().UnixMilli(), } + + // 获取上传会话信息 var u UploadInfo err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) { req.SetBody(uploadBody) @@ -152,36 +168,26 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File if err != nil { return err } - var chunkSize = u.ChunkSize - var buf []byte - var chunk int - for { - var n int - buf = make([]byte, chunkSize) - n, err = io.ReadAtLeast(stream, buf, chunkSize) - if err != nil && err != io.ErrUnexpectedEOF { - if err == io.EOF { - return nil - } - return err - } - - if n == 0 { - break - } - buf = buf[:n] - err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { - req.SetHeader("Content-Type", "application/octet-stream") - req.SetHeader("Content-Length", strconv.Itoa(n)) - req.SetBody(buf) - }, nil) - if err != nil { - break - } - chunk++ + // 根据存储方式选择分片上传的方法 + switch r.Policy.Type { + case "onedrive": + err = d.upOneDrive(ctx, stream, u, up) + case "s3": + err = d.upS3(ctx, stream, u, up) + case "remote": // 从机存储 + err = d.upRemote(ctx, stream, u, up) + case "local": // 本机存储 + err = d.upLocal(ctx, stream, u, up) + default: + err = errs.NotImplement } - return err + if err != nil { + // 删除失败的会话 + _ = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil) + return err + } + return nil } func (d *Cloudreve) create(ctx context.Context, dir model.Obj, file model.Obj) error { diff --git a/drivers/cloudreve/types.go b/drivers/cloudreve/types.go index 241d993e..8a465f01 100644 --- a/drivers/cloudreve/types.go +++ b/drivers/cloudreve/types.go @@ -21,9 +21,12 @@ type Policy struct { } type UploadInfo struct { - SessionID string `json:"sessionID"` - ChunkSize int `json:"chunkSize"` - Expires int `json:"expires"` + SessionID string `json:"sessionID"` + ChunkSize int `json:"chunkSize"` + Expires int `json:"expires"` + UploadURLs []string `json:"uploadURLs"` + Credential string `json:"credential,omitempty"` // local + CompleteURL string `json:"completeURL,omitempty"` // s3 } type DirectoryResp struct { diff --git a/drivers/cloudreve/util.go b/drivers/cloudreve/util.go index 284e3289..5054de6c 100644 --- a/drivers/cloudreve/util.go +++ b/drivers/cloudreve/util.go @@ -1,18 +1,26 @@ package cloudreve import ( + "bytes" + "context" "encoding/base64" + "encoding/json" "errors" + "fmt" + "io" "net/http" + "strconv" "strings" + "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/pkg/cookie" + "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" - json "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go" ) @@ -20,17 +28,23 @@ import ( const loginPath = "/user/session" -func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error { - u := d.Address + "/api/v3" + path - ua := d.CustomUA - if ua == "" { - ua = base.UserAgent +func (d *Cloudreve) getUA() string { + if d.CustomUA != "" { + return d.CustomUA } + return base.UserAgent +} + +func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error { + if d.ref != nil { + return d.ref.request(method, path, callback, out) + } + u := d.Address + "/api/v3" + path req := base.RestyClient.R() req.SetHeaders(map[string]string{ "Cookie": "cloudreve-session=" + d.Cookie, "Accept": "application/json, text/plain, */*", - "User-Agent": ua, + "User-Agent": d.getUA(), }) var r Resp @@ -69,11 +83,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac } if out != nil && r.Data != nil { var marshal []byte - marshal, err = json.Marshal(r.Data) + marshal, err = jsoniter.Marshal(r.Data) if err != nil { return err } - err = json.Unmarshal(marshal, out) + err = jsoniter.Unmarshal(marshal, out) if err != nil { return err } @@ -93,7 +107,7 @@ func (d *Cloudreve) login() error { if err == nil { break } - if err != nil && err.Error() != "CAPTCHA not match." { + if err.Error() != "CAPTCHA not match." { break } } @@ -154,15 +168,11 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) { if !d.Addition.EnableThumbAndFolderSize { return model.Thumbnail{}, nil } - ua := d.CustomUA - if ua == "" { - ua = base.UserAgent - } req := base.NoRedirectClient.R() req.SetHeaders(map[string]string{ "Cookie": "cloudreve-session=" + d.Cookie, "Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8", - "User-Agent": ua, + "User-Agent": d.getUA(), }) resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id) if err != nil { @@ -172,3 +182,281 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) { Thumbnail: resp.Header().Get("Location"), }, nil } + +func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { + var finish int64 = 0 + var chunk int = 0 + DEFAULT := int64(u.ChunkSize) + for finish < stream.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + left := stream.GetSize() - finish + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) + byteData := make([]byte, byteSize) + n, err := io.ReadFull(stream, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { + req.SetHeader("Content-Type", "application/octet-stream") + req.SetContentLength(true) + req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10)) + req.SetHeader("User-Agent", d.getUA()) + req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + req.AddRetryCondition(func(r *resty.Response, err error) bool { + if err != nil { + return true + } + if r.IsError() { + return true + } + var retryResp Resp + jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp) + if jErr != nil { + return true + } + if retryResp.Code != 0 { + return true + } + return false + }) + }, nil) + if err != nil { + return err + } + finish += byteSize + up(float64(finish) * 100 / float64(stream.GetSize())) + chunk++ + } + return nil +} + +func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error { + uploadUrl := u.UploadURLs[0] + credential := u.Credential + var finish int64 = 0 + var chunk int = 0 + DEFAULT := int64(u.ChunkSize) + retryCount := 0 + maxRetries := 3 + for finish < stream.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + left := stream.GetSize() - finish + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) + byteData := make([]byte, byteSize) + n, err := io.ReadFull(stream, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), + driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.ContentLength = byteSize + // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.Header.Set("Authorization", fmt.Sprint(credential)) + req.Header.Set("User-Agent", d.getUA()) + err = func() error { + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return errors.New(res.Status) + } + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + var up Resp + err = json.Unmarshal(body, &up) + if err != nil { + return err + } + if up.Code != 0 { + return errors.New(up.Msg) + } + return nil + }() + if err == nil { + retryCount = 0 + finish += byteSize + up(float64(finish) * 100 / float64(stream.GetSize())) + chunk++ + } else { + retryCount++ + if retryCount > maxRetries { + return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err) + } + backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + retryCount++ + if retryCount > maxRetries { + res.Body.Close() + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1< maxRetries { + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1<") + for i, etag := range etags { + bodyBuilder.WriteString(fmt.Sprintf( + `%d%s`, + i+1, // PartNumber 从 1 开始 + etag, + )) + } + bodyBuilder.WriteString("") + req, err := http.NewRequest( + "POST", + u.CompleteURL, + strings.NewReader(bodyBuilder.String()), + ) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/xml") + req.Header.Set("User-Agent", d.getUA()) + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body)) + } + + // 上传成功发送回调请求 + err = d.request(http.MethodGet, "/callback/s3/"+u.SessionID, nil, nil) + if err != nil { + return err + } + return nil +} diff --git a/drivers/cloudreve_v4/driver.go b/drivers/cloudreve_v4/driver.go new file mode 100644 index 00000000..32a22f62 --- /dev/null +++ b/drivers/cloudreve_v4/driver.go @@ -0,0 +1,305 @@ +package cloudreve_v4 + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" +) + +type CloudreveV4 struct { + model.Storage + Addition + ref *CloudreveV4 +} + +func (d *CloudreveV4) Config() driver.Config { + if d.ref != nil { + return d.ref.Config() + } + if d.EnableVersionUpload { + config.NoOverwriteUpload = false + } + return config +} + +func (d *CloudreveV4) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *CloudreveV4) Init(ctx context.Context) error { + // removing trailing slash + d.Address = strings.TrimSuffix(d.Address, "/") + op.MustSaveDriverStorage(d) + if d.ref != nil { + return nil + } + if d.AccessToken == "" && d.RefreshToken != "" { + return d.refreshToken() + } + if d.Username != "" { + return d.login() + } + return nil +} + +func (d *CloudreveV4) InitReference(storage driver.Driver) error { + refStorage, ok := storage.(*CloudreveV4) + if ok { + d.ref = refStorage + return nil + } + return errs.NotSupport +} + +func (d *CloudreveV4) Drop(ctx context.Context) error { + d.ref = nil + return nil +} + +func (d *CloudreveV4) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + const pageSize int = 100 + var f []File + var r FileResp + params := map[string]string{ + "page_size": strconv.Itoa(pageSize), + "uri": dir.GetPath(), + "order_by": d.OrderBy, + "order_direction": d.OrderDirection, + "page": "0", + } + + for { + err := d.request(http.MethodGet, "/file", func(req *resty.Request) { + req.SetQueryParams(params) + }, &r) + if err != nil { + return nil, err + } + f = append(f, r.Files...) + if r.Pagination.NextToken == "" || len(r.Files) < pageSize { + break + } + params["next_page_token"] = r.Pagination.NextToken + } + + return utils.SliceConvert(f, func(src File) (model.Obj, error) { + if d.EnableFolderSize && src.Type == 1 { + var ds FolderSummaryResp + err := d.request(http.MethodGet, "/file/info", func(req *resty.Request) { + req.SetQueryParam("uri", src.Path) + req.SetQueryParam("folder_summary", "true") + }, &ds) + if err == nil && ds.FolderSummary.Size > 0 { + src.Size = ds.FolderSummary.Size + } + } + var thumb model.Thumbnail + if d.EnableThumb && src.Type == 0 { + var t FileThumbResp + err := d.request(http.MethodGet, "/file/thumb", func(req *resty.Request) { + req.SetQueryParam("uri", src.Path) + }, &t) + if err == nil && t.URL != "" { + thumb = model.Thumbnail{ + Thumbnail: t.URL, + } + } + } + return &model.ObjThumb{ + Object: model.Object{ + ID: src.ID, + Path: src.Path, + Name: src.Name, + Size: src.Size, + Modified: src.UpdatedAt, + Ctime: src.CreatedAt, + IsFolder: src.Type == 1, + }, + Thumbnail: thumb, + }, nil + }) +} + +func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var url FileUrlResp + err := d.request(http.MethodPost, "/file/url", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{file.GetPath()}, + "download": true, + }) + }, &url) + if err != nil { + return nil, err + } + if len(url.Urls) == 0 { + return nil, errors.New("server returns no url") + } + exp := time.Until(url.Expires) + return &model.Link{ + URL: url.Urls[0].URL, + Expiration: &exp, + }, nil +} + +func (d *CloudreveV4) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + return d.request(http.MethodPost, "/file/create", func(req *resty.Request) { + req.SetBody(base.Json{ + "type": "folder", + "uri": parentDir.GetPath() + "/" + dirName, + "error_on_conflict": true, + }) + }, nil) +} + +func (d *CloudreveV4) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.request(http.MethodPost, "/file/move", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{srcObj.GetPath()}, + "dst": dstDir.GetPath(), + "copy": false, + }) + }, nil) +} + +func (d *CloudreveV4) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + return d.request(http.MethodPost, "/file/create", func(req *resty.Request) { + req.SetBody(base.Json{ + "new_name": newName, + "uri": srcObj.GetPath(), + }) + }, nil) + +} + +func (d *CloudreveV4) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.request(http.MethodPost, "/file/move", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{srcObj.GetPath()}, + "dst": dstDir.GetPath(), + "copy": true, + }) + }, nil) +} + +func (d *CloudreveV4) Remove(ctx context.Context, obj model.Obj) error { + return d.request(http.MethodDelete, "/file", func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{obj.GetPath()}, + "unlink": false, + "skip_soft_delete": true, + }) + }, nil) +} + +func (d *CloudreveV4) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + if file.GetSize() == 0 { + // 空文件使用新建文件方法,避免上传卡锁 + return d.request(http.MethodPost, "/file/create", func(req *resty.Request) { + req.SetBody(base.Json{ + "type": "file", + "uri": dstDir.GetPath() + "/" + file.GetName(), + "error_on_conflict": true, + }) + }, nil) + } + var p StoragePolicy + var r FileResp + var u FileUploadResp + var err error + params := map[string]string{ + "page_size": "10", + "uri": dstDir.GetPath(), + "order_by": "created_at", + "order_direction": "asc", + "page": "0", + } + err = d.request(http.MethodGet, "/file", func(req *resty.Request) { + req.SetQueryParams(params) + }, &r) + if err != nil { + return err + } + p = r.StoragePolicy + body := base.Json{ + "uri": dstDir.GetPath() + "/" + file.GetName(), + "size": file.GetSize(), + "policy_id": p.ID, + "last_modified": file.ModTime().UnixMilli(), + "mime_type": "", + } + if d.EnableVersionUpload { + body["entity_type"] = "version" + } + err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) { + req.SetBody(body) + }, &u) + if err != nil { + return err + } + if u.StoragePolicy.Relay { + err = d.upLocal(ctx, file, u, up) + } else { + switch u.StoragePolicy.Type { + case "local": + err = d.upLocal(ctx, file, u, up) + case "remote": + err = d.upRemote(ctx, file, u, up) + case "onedrive": + err = d.upOneDrive(ctx, file, u, up) + case "s3": + err = d.upS3(ctx, file, u, up) + default: + return errs.NotImplement + } + } + if err != nil { + // 删除失败的会话 + _ = d.request(http.MethodDelete, "/file/upload", func(req *resty.Request) { + req.SetBody(base.Json{ + "id": u.SessionID, + "uri": u.URI, + }) + }, nil) + return err + } + return nil +} + +func (d *CloudreveV4) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *CloudreveV4) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *CloudreveV4) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { + // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional + // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir + // return errs.NotImplement to use an internal archive tool + return nil, errs.NotImplement +} + +//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*CloudreveV4)(nil) diff --git a/drivers/cloudreve_v4/meta.go b/drivers/cloudreve_v4/meta.go new file mode 100644 index 00000000..bfaa14f8 --- /dev/null +++ b/drivers/cloudreve_v4/meta.go @@ -0,0 +1,44 @@ +package cloudreve_v4 + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + driver.RootPath + // driver.RootID + // define other + Address string `json:"address" required:"true"` + Username string `json:"username"` + Password string `json:"password"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + CustomUA string `json:"custom_ua"` + EnableFolderSize bool `json:"enable_folder_size"` + EnableThumb bool `json:"enable_thumb"` + EnableVersionUpload bool `json:"enable_version_upload"` + OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at" default:"name" required:"true"` + OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc" required:"true"` +} + +var config = driver.Config{ + Name: "Cloudreve V4", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "cloudreve://my", + CheckStatus: true, + Alert: "", + NoOverwriteUpload: true, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &CloudreveV4{} + }) +} diff --git a/drivers/cloudreve_v4/types.go b/drivers/cloudreve_v4/types.go new file mode 100644 index 00000000..e81226d3 --- /dev/null +++ b/drivers/cloudreve_v4/types.go @@ -0,0 +1,164 @@ +package cloudreve_v4 + +import ( + "time" + + "github.com/alist-org/alist/v3/internal/model" +) + +type Object struct { + model.Object + StoragePolicy StoragePolicy +} + +type Resp struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data any `json:"data"` +} + +type BasicConfigResp struct { + InstanceID string `json:"instance_id"` + // Title string `json:"title"` + // Themes string `json:"themes"` + // DefaultTheme string `json:"default_theme"` + User struct { + ID string `json:"id"` + // Nickname string `json:"nickname"` + // CreatedAt time.Time `json:"created_at"` + // Anonymous bool `json:"anonymous"` + Group struct { + ID string `json:"id"` + Name string `json:"name"` + Permission string `json:"permission"` + } `json:"group"` + } `json:"user"` + // Logo string `json:"logo"` + // LogoLight string `json:"logo_light"` + // CaptchaReCaptchaKey string `json:"captcha_ReCaptchaKey"` + CaptchaType string `json:"captcha_type"` // support 'normal' only + // AppPromotion bool `json:"app_promotion"` +} + +type SiteLoginConfigResp struct { + LoginCaptcha bool `json:"login_captcha"` + Authn bool `json:"authn"` +} + +type PrepareLoginResp struct { + WebauthnEnabled bool `json:"webauthn_enabled"` + PasswordEnabled bool `json:"password_enabled"` +} + +type CaptchaResp struct { + Image string `json:"image"` + Ticket string `json:"ticket"` +} + +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + AccessExpires time.Time `json:"access_expires"` + RefreshExpires time.Time `json:"refresh_expires"` +} + +type TokenResponse struct { + User struct { + ID string `json:"id"` + // Email string `json:"email"` + // Nickname string `json:"nickname"` + Status string `json:"status"` + // CreatedAt time.Time `json:"created_at"` + Group struct { + ID string `json:"id"` + Name string `json:"name"` + Permission string `json:"permission"` + // DirectLinkBatchSize int `json:"direct_link_batch_size"` + // TrashRetention int `json:"trash_retention"` + } `json:"group"` + // Language string `json:"language"` + } `json:"user"` + Token Token `json:"token"` +} + +type File struct { + Type int `json:"type"` // 0: file, 1: folder + ID string `json:"id"` + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Size int64 `json:"size"` + Metadata interface{} `json:"metadata"` + Path string `json:"path"` + Capability string `json:"capability"` + Owned bool `json:"owned"` + PrimaryEntity string `json:"primary_entity"` +} + +type StoragePolicy struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + MaxSize int64 `json:"max_size"` + Relay bool `json:"relay,omitempty"` +} + +type Pagination struct { + Page int `json:"page"` + PageSize int `json:"page_size"` + IsCursor bool `json:"is_cursor"` + NextToken string `json:"next_token,omitempty"` +} + +type Props struct { + Capability string `json:"capability"` + MaxPageSize int `json:"max_page_size"` + OrderByOptions []string `json:"order_by_options"` + OrderDirectionOptions []string `json:"order_direction_options"` +} + +type FileResp struct { + Files []File `json:"files"` + Parent File `json:"parent"` + Pagination Pagination `json:"pagination"` + Props Props `json:"props"` + ContextHint string `json:"context_hint"` + MixedType bool `json:"mixed_type"` + StoragePolicy StoragePolicy `json:"storage_policy"` +} + +type FileUrlResp struct { + Urls []struct { + URL string `json:"url"` + } `json:"urls"` + Expires time.Time `json:"expires"` +} + +type FileUploadResp struct { + // UploadID string `json:"upload_id"` + SessionID string `json:"session_id"` + ChunkSize int64 `json:"chunk_size"` + Expires int64 `json:"expires"` + StoragePolicy StoragePolicy `json:"storage_policy"` + URI string `json:"uri"` + CompleteURL string `json:"completeURL,omitempty"` // for S3-like + CallbackSecret string `json:"callback_secret,omitempty"` // for S3-like, OneDrive + UploadUrls []string `json:"upload_urls,omitempty"` // for not-local + Credential string `json:"credential,omitempty"` // for local +} + +type FileThumbResp struct { + URL string `json:"url"` + Expires time.Time `json:"expires"` +} + +type FolderSummaryResp struct { + File + FolderSummary struct { + Size int64 `json:"size"` + Files int64 `json:"files"` + Folders int64 `json:"folders"` + Completed bool `json:"completed"` + CalculatedAt time.Time `json:"calculated_at"` + } `json:"folder_summary"` +} diff --git a/drivers/cloudreve_v4/util.go b/drivers/cloudreve_v4/util.go new file mode 100644 index 00000000..cf2337f2 --- /dev/null +++ b/drivers/cloudreve_v4/util.go @@ -0,0 +1,476 @@ +package cloudreve_v4 + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" + jsoniter "github.com/json-iterator/go" +) + +// do others that not defined in Driver interface + +func (d *CloudreveV4) getUA() string { + if d.CustomUA != "" { + return d.CustomUA + } + return base.UserAgent +} + +func (d *CloudreveV4) request(method string, path string, callback base.ReqCallback, out any) error { + if d.ref != nil { + return d.ref.request(method, path, callback, out) + } + u := d.Address + "/api/v4" + path + req := base.RestyClient.R() + req.SetHeaders(map[string]string{ + "Accept": "application/json, text/plain, */*", + "User-Agent": d.getUA(), + }) + if d.AccessToken != "" { + req.SetHeader("Authorization", "Bearer "+d.AccessToken) + } + + var r Resp + req.SetResult(&r) + + if callback != nil { + callback(req) + } + + resp, err := req.Execute(method, u) + if err != nil { + return err + } + if !resp.IsSuccess() { + return errors.New(resp.String()) + } + + if r.Code != 0 { + if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" { + // try to refresh token + err = d.refreshToken() + if err != nil { + return err + } + return d.request(method, path, callback, out) + } + return errors.New(r.Msg) + } + + if out != nil && r.Data != nil { + var marshal []byte + marshal, err = json.Marshal(r.Data) + if err != nil { + return err + } + err = json.Unmarshal(marshal, out) + if err != nil { + return err + } + } + + return nil +} + +func (d *CloudreveV4) login() error { + var siteConfig SiteLoginConfigResp + err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig) + if err != nil { + return err + } + if !siteConfig.Authn { + return errors.New("authn not support") + } + var prepareLogin PrepareLoginResp + err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin) + if err != nil { + return err + } + if !prepareLogin.PasswordEnabled { + return errors.New("password not enabled") + } + if prepareLogin.WebauthnEnabled { + return errors.New("webauthn not support") + } + for range 5 { + err = d.doLogin(siteConfig.LoginCaptcha) + if err == nil { + break + } + if err.Error() != "CAPTCHA not match." { + break + } + } + return err +} + +func (d *CloudreveV4) doLogin(needCaptcha bool) error { + var err error + loginBody := base.Json{ + "email": d.Username, + "password": d.Password, + } + if needCaptcha { + var config BasicConfigResp + err = d.request(http.MethodGet, "/site/config/basic", nil, &config) + if err != nil { + return err + } + if config.CaptchaType != "normal" { + return fmt.Errorf("captcha type %s not support", config.CaptchaType) + } + var captcha CaptchaResp + err = d.request(http.MethodGet, "/site/captcha", nil, &captcha) + if err != nil { + return err + } + if !strings.HasPrefix(captcha.Image, "data:image/png;base64,") { + return errors.New("can not get captcha") + } + loginBody["ticket"] = captcha.Ticket + i := strings.Index(captcha.Image, ",") + dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha.Image[i+1:])) + vRes, err := base.RestyClient.R().SetMultipartField( + "image", "validateCode.png", "image/png", dec). + Post(setting.GetStr(conf.OcrApi)) + if err != nil { + return err + } + if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 { + return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString()) + } + captchaCode := jsoniter.Get(vRes.Body(), "result").ToString() + if captchaCode == "" { + return errors.New("ocr error: empty result") + } + loginBody["captcha"] = captchaCode + } + var token TokenResponse + err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) { + req.SetBody(loginBody) + }, &token) + if err != nil { + return err + } + d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken + op.MustSaveDriverStorage(d) + return nil +} + +func (d *CloudreveV4) refreshToken() error { + var token Token + if token.RefreshToken == "" { + if d.Username != "" { + err := d.login() + if err != nil { + return fmt.Errorf("cannot login to get refresh token, error: %s", err) + } + } + return nil + } + err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) { + req.SetBody(base.Json{ + "refresh_token": d.RefreshToken, + }) + }, &token) + if err != nil { + return err + } + d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken + op.MustSaveDriverStorage(d) + return nil +} + +func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { + var finish int64 = 0 + var chunk int = 0 + DEFAULT := int64(u.ChunkSize) + if DEFAULT == 0 { + // support relay + DEFAULT = file.GetSize() + } + for finish < file.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + left := file.GetSize() - finish + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[CloudreveV4-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) + byteData := make([]byte, byteSize) + n, err := io.ReadFull(file, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) { + req.SetHeader("Content-Type", "application/octet-stream") + req.SetContentLength(true) + req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10)) + req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + req.AddRetryCondition(func(r *resty.Response, err error) bool { + if err != nil { + return true + } + if r.IsError() { + return true + } + var retryResp Resp + jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp) + if jErr != nil { + return true + } + if retryResp.Code != 0 { + return true + } + return false + }) + }, nil) + if err != nil { + return err + } + finish += byteSize + up(float64(finish) * 100 / float64(file.GetSize())) + chunk++ + } + return nil +} + +func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error { + uploadUrl := u.UploadUrls[0] + credential := u.Credential + var finish int64 = 0 + var chunk int = 0 + DEFAULT := int64(u.ChunkSize) + retryCount := 0 + maxRetries := 3 + for finish < file.GetSize() { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + left := file.GetSize() - finish + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize()) + byteData := make([]byte, byteSize) + n, err := io.ReadFull(file, byteData) + utils.Log.Debug(err, n) + if err != nil { + return err + } + req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), + driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) + if err != nil { + return err + } + req = req.WithContext(ctx) + req.ContentLength = byteSize + // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.Header.Set("Authorization", fmt.Sprint(credential)) + req.Header.Set("User-Agent", d.getUA()) + err = func() error { + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return errors.New(res.Status) + } + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + var up Resp + err = json.Unmarshal(body, &up) + if err != nil { + return err + } + if up.Code != 0 { + return errors.New(up.Msg) + } + return nil + }() + if err == nil { + retryCount = 0 + finish += byteSize + up(float64(finish) * 100 / float64(file.GetSize())) + chunk++ + } else { + retryCount++ + if retryCount > maxRetries { + return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err) + } + backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + retryCount++ + if retryCount > maxRetries { + res.Body.Close() + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1< maxRetries { + return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries) + } + backoff := time.Duration(1<") + for i, etag := range etags { + bodyBuilder.WriteString(fmt.Sprintf( + `%d%s`, + i+1, // PartNumber 从 1 开始 + etag, + )) + } + bodyBuilder.WriteString("") + req, err := http.NewRequest( + "POST", + u.CompleteURL, + strings.NewReader(bodyBuilder.String()), + ) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/xml") + req.Header.Set("User-Agent", d.getUA()) + res, err := base.HttpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + body, _ := io.ReadAll(res.Body) + return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body)) + } + + // 上传成功发送回调请求 + return d.request(http.MethodPost, "/callback/s3/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) { + req.SetBody("{}") + }, nil) +} diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index b0325db4..2330fb97 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -13,6 +13,7 @@ import ( "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" @@ -160,7 +161,11 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ // discarding hash as it's encrypted } if d.Thumbnail && thumb == "" { - thumb = utils.EncodePath(common.GetApiUrl(nil)+stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true) + thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp") + thumb = fmt.Sprintf("%s/d%s?sign=%s", + common.GetApiUrl(common.GetHttpReq(ctx)), + utils.EncodePath(thumbPath, true), + sign.Sign(thumbPath)) } if !ok && !d.Thumbnail { result = append(result, &objRes) @@ -258,19 +263,13 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( } rrc := remoteLink.RangeReadCloser if len(remoteLink.URL) > 0 { - - rangedRemoteLink := &model.Link{ - URL: remoteLink.URL, - Header: remoteLink.Header, - } - var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink) + var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink) if err != nil { return nil, err } rrc = converted } if rrc != nil { - //remoteRangeReader, err := remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length}) remoteClosers.AddClosers(rrc.GetClosers()) if err != nil { @@ -283,7 +282,6 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( if err != nil { return nil, err } - //remoteClosers.Add(remoteLink.MFile) //keep reuse same MFile and close at last. remoteClosers.Add(remoteLink.MFile) return io.NopCloser(remoteLink.MFile), nil @@ -302,7 +300,6 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers} resultLink := &model.Link{ - Header: remoteLink.Header, RangeReadCloser: resultRangeReadCloser, Expiration: remoteLink.Expiration, } diff --git a/drivers/doubao/driver.go b/drivers/doubao/driver.go new file mode 100644 index 00000000..0d421946 --- /dev/null +++ b/drivers/doubao/driver.go @@ -0,0 +1,271 @@ +package doubao + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" + "time" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" + "github.com/google/uuid" +) + +type Doubao struct { + model.Storage + Addition + *UploadToken + UserId string + uploadThread int +} + +func (d *Doubao) Config() driver.Config { + return config +} + +func (d *Doubao) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Doubao) Init(ctx context.Context) error { + // TODO login / refresh token + //op.MustSaveDriverStorage(d) + uploadThread, err := strconv.Atoi(d.UploadThread) + if err != nil || uploadThread < 1 { + d.uploadThread, d.UploadThread = 3, "3" // Set default value + } else { + d.uploadThread = uploadThread + } + + if d.UserId == "" { + userInfo, err := d.getUserInfo() + if err != nil { + return err + } + + d.UserId = strconv.FormatInt(userInfo.UserID, 10) + } + + if d.UploadToken == nil { + uploadToken, err := d.initUploadToken() + if err != nil { + return err + } + + d.UploadToken = uploadToken + } + + return nil +} + +func (d *Doubao) Drop(ctx context.Context) error { + return nil +} + +func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + var files []model.Obj + fileList, err := d.getFiles(dir.GetID(), "") + if err != nil { + return nil, err + } + + for _, child := range fileList { + files = append(files, &Object{ + Object: model.Object{ + ID: child.ID, + Path: child.ParentID, + Name: child.Name, + Size: child.Size, + Modified: time.Unix(child.UpdateTime, 0), + Ctime: time.Unix(child.CreateTime, 0), + IsFolder: child.NodeType == 1, + }, + Key: child.Key, + NodeType: child.NodeType, + }) + } + + return files, nil +} + +func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var downloadUrl string + + if u, ok := file.(*Object); ok { + switch d.DownloadApi { + case "get_download_info": + var r GetDownloadInfoResp + _, err := d.request("/samantha/aispace/get_download_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "requests": []base.Json{{"node_id": file.GetID()}}, + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.DownloadInfos[0].MainURL + case "get_file_url": + switch u.NodeType { + case VideoType, AudioType: + var r GetVideoFileUrlResp + _, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "key": u.Key, + "node_id": file.GetID(), + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.OriginalMediaInfo.MainURL + default: + var r GetFileUrlResp + _, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{u.Key}, + "type": FileNodeType[u.NodeType], + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.FileUrls[0].MainURL + } + default: + return nil, errs.NotImplement + } + + // 生成标准的Content-Disposition + contentDisposition := generateContentDisposition(u.Name) + + return &model.Link{ + URL: downloadUrl, + Header: http.Header{ + "User-Agent": []string{UserAgent}, + "Content-Disposition": []string{contentDisposition}, + }, + }, nil + } + + return nil, errors.New("can't convert obj to URL") +} + +func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + var r UploadNodeResp + _, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "node_list": []base.Json{ + { + "local_id": uuid.New().String(), + "name": dirName, + "parent_id": parentDir.GetID(), + "node_type": 1, + }, + }, + }) + }, &r) + return err +} + +func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + var r UploadNodeResp + _, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "node_list": []base.Json{ + {"id": srcObj.GetID()}, + }, + "current_parent_id": srcObj.GetPath(), + "target_parent_id": dstDir.GetID(), + }) + }, &r) + return err +} + +func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + var r BaseResp + _, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "node_id": srcObj.GetID(), + "node_name": newName, + }) + }, &r) + return err +} + +func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO copy obj, optional + return nil, errs.NotImplement +} + +func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error { + var r BaseResp + _, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}}) + }, &r) + return err +} + +func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + // 根据MIME类型确定数据类型 + mimetype := file.GetMimetype() + dataType := FileDataType + + switch { + case strings.HasPrefix(mimetype, "video/"): + dataType = VideoDataType + case strings.HasPrefix(mimetype, "audio/"): + dataType = VideoDataType // 音频与视频使用相同的处理方式 + case strings.HasPrefix(mimetype, "image/"): + dataType = ImgDataType + } + + // 获取上传配置 + uploadConfig := UploadConfig{} + if err := d.getUploadConfig(&uploadConfig, dataType, file); err != nil { + return nil, err + } + + // 根据文件大小选择上传方式 + if file.GetSize() <= 1*utils.MB { // 小于1MB,使用普通模式上传 + return d.Upload(&uploadConfig, dstDir, file, up, dataType) + } + // 大文件使用分片上传 + return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType) +} + +func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Doubao) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Doubao) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Doubao) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { + // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional + // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir + // return errs.NotImplement to use an internal archive tool + return nil, errs.NotImplement +} + +//func (d *Doubao) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*Doubao)(nil) diff --git a/drivers/doubao/meta.go b/drivers/doubao/meta.go new file mode 100644 index 00000000..7735e5ff --- /dev/null +++ b/drivers/doubao/meta.go @@ -0,0 +1,36 @@ +package doubao + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + // driver.RootPath + driver.RootID + // define other + Cookie string `json:"cookie" type:"text"` + UploadThread string `json:"upload_thread" default:"3"` + DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"` +} + +var config = driver.Config{ + Name: "Doubao", + LocalSort: true, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "0", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Doubao{} + }) +} diff --git a/drivers/doubao/types.go b/drivers/doubao/types.go new file mode 100644 index 00000000..ae747f88 --- /dev/null +++ b/drivers/doubao/types.go @@ -0,0 +1,415 @@ +package doubao + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/alist-org/alist/v3/internal/model" +) + +type BaseResp struct { + Code int `json:"code"` + Msg string `json:"msg"` +} + +type NodeInfoResp struct { + BaseResp + Data struct { + NodeInfo File `json:"node_info"` + Children []File `json:"children"` + NextCursor string `json:"next_cursor"` + HasMore bool `json:"has_more"` + } `json:"data"` +} + +type File struct { + ID string `json:"id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹 + Size int64 `json:"size"` + Source int `json:"source"` + NameReviewStatus int `json:"name_review_status"` + ContentReviewStatus int `json:"content_review_status"` + RiskReviewStatus int `json:"risk_review_status"` + ConversationID string `json:"conversation_id"` + ParentID string `json:"parent_id"` + CreateTime int64 `json:"create_time"` + UpdateTime int64 `json:"update_time"` +} + +type GetDownloadInfoResp struct { + BaseResp + Data struct { + DownloadInfos []struct { + NodeID string `json:"node_id"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"download_infos"` + } `json:"data"` +} + +type GetFileUrlResp struct { + BaseResp + Data struct { + FileUrls []struct { + URI string `json:"uri"` + MainURL string `json:"main_url"` + BackURL string `json:"back_url"` + } `json:"file_urls"` + } `json:"data"` +} + +type GetVideoFileUrlResp struct { + BaseResp + Data struct { + MediaType string `json:"media_type"` + MediaInfo []struct { + Meta struct { + Height string `json:"height"` + Width string `json:"width"` + Format string `json:"format"` + Duration float64 `json:"duration"` + CodecType string `json:"codec_type"` + Definition string `json:"definition"` + } `json:"meta"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"media_info"` + OriginalMediaInfo struct { + Meta struct { + Height string `json:"height"` + Width string `json:"width"` + Format string `json:"format"` + Duration float64 `json:"duration"` + CodecType string `json:"codec_type"` + Definition string `json:"definition"` + } `json:"meta"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"original_media_info"` + PosterURL string `json:"poster_url"` + PlayableStatus int `json:"playable_status"` + } `json:"data"` +} + +type UploadNodeResp struct { + BaseResp + Data struct { + NodeList []struct { + LocalID string `json:"local_id"` + ID string `json:"id"` + ParentID string `json:"parent_id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹 + } `json:"node_list"` + } `json:"data"` +} + +type Object struct { + model.Object + Key string + NodeType int +} + +type UserInfoResp struct { + Data UserInfo `json:"data"` + Message string `json:"message"` +} +type AppUserInfo struct { + BuiAuditInfo string `json:"bui_audit_info"` +} +type AuditInfo struct { +} +type Details struct { +} +type BuiAuditInfo struct { + AuditInfo AuditInfo `json:"audit_info"` + IsAuditing bool `json:"is_auditing"` + AuditStatus int `json:"audit_status"` + LastUpdateTime int `json:"last_update_time"` + UnpassReason string `json:"unpass_reason"` + Details Details `json:"details"` +} +type Connects struct { + Platform string `json:"platform"` + ProfileImageURL string `json:"profile_image_url"` + ExpiredTime int `json:"expired_time"` + ExpiresIn int `json:"expires_in"` + PlatformScreenName string `json:"platform_screen_name"` + UserID int64 `json:"user_id"` + PlatformUID string `json:"platform_uid"` + SecPlatformUID string `json:"sec_platform_uid"` + PlatformAppID int `json:"platform_app_id"` + ModifyTime int `json:"modify_time"` + AccessToken string `json:"access_token"` + OpenID string `json:"open_id"` +} +type OperStaffRelationInfo struct { + HasPassword int `json:"has_password"` + Mobile string `json:"mobile"` + SecOperStaffUserID string `json:"sec_oper_staff_user_id"` + RelationMobileCountryCode int `json:"relation_mobile_country_code"` +} +type UserInfo struct { + AppID int `json:"app_id"` + AppUserInfo AppUserInfo `json:"app_user_info"` + AvatarURL string `json:"avatar_url"` + BgImgURL string `json:"bg_img_url"` + BuiAuditInfo BuiAuditInfo `json:"bui_audit_info"` + CanBeFoundByPhone int `json:"can_be_found_by_phone"` + Connects []Connects `json:"connects"` + CountryCode int `json:"country_code"` + Description string `json:"description"` + DeviceID int `json:"device_id"` + Email string `json:"email"` + EmailCollected bool `json:"email_collected"` + Gender int `json:"gender"` + HasPassword int `json:"has_password"` + HmRegion int `json:"hm_region"` + IsBlocked int `json:"is_blocked"` + IsBlocking int `json:"is_blocking"` + IsRecommendAllowed int `json:"is_recommend_allowed"` + IsVisitorAccount bool `json:"is_visitor_account"` + Mobile string `json:"mobile"` + Name string `json:"name"` + NeedCheckBindStatus bool `json:"need_check_bind_status"` + OdinUserType int `json:"odin_user_type"` + OperStaffRelationInfo OperStaffRelationInfo `json:"oper_staff_relation_info"` + PhoneCollected bool `json:"phone_collected"` + RecommendHintMessage string `json:"recommend_hint_message"` + ScreenName string `json:"screen_name"` + SecUserID string `json:"sec_user_id"` + SessionKey string `json:"session_key"` + UseHmRegion bool `json:"use_hm_region"` + UserCreateTime int `json:"user_create_time"` + UserID int64 `json:"user_id"` + UserIDStr string `json:"user_id_str"` + UserVerified bool `json:"user_verified"` + VerifiedContent string `json:"verified_content"` +} + +// UploadToken 上传令牌配置 +type UploadToken struct { + Alice map[string]UploadAuthToken + Samantha MediaUploadAuthToken +} + +// UploadAuthToken 多种类型的上传配置:图片/文件 +type UploadAuthToken struct { + ServiceID string `json:"service_id"` + UploadPathPrefix string `json:"upload_path_prefix"` + Auth struct { + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + SessionToken string `json:"session_token"` + ExpiredTime time.Time `json:"expired_time"` + CurrentTime time.Time `json:"current_time"` + } `json:"auth"` + UploadHost string `json:"upload_host"` +} + +// MediaUploadAuthToken 媒体上传配置 +type MediaUploadAuthToken struct { + StsToken struct { + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + SessionToken string `json:"session_token"` + ExpiredTime time.Time `json:"expired_time"` + CurrentTime time.Time `json:"current_time"` + } `json:"sts_token"` + UploadInfo struct { + VideoHost string `json:"video_host"` + SpaceName string `json:"space_name"` + } `json:"upload_info"` +} + +type UploadAuthTokenResp struct { + BaseResp + Data UploadAuthToken `json:"data"` +} + +type MediaUploadAuthTokenResp struct { + BaseResp + Data MediaUploadAuthToken `json:"data"` +} + +type ResponseMetadata struct { + RequestID string `json:"RequestId"` + Action string `json:"Action"` + Version string `json:"Version"` + Service string `json:"Service"` + Region string `json:"Region"` + Error struct { + CodeN int `json:"CodeN,omitempty"` + Code string `json:"Code,omitempty"` + Message string `json:"Message,omitempty"` + } `json:"Error,omitempty"` +} + +type UploadConfig struct { + UploadAddress UploadAddress `json:"UploadAddress"` + FallbackUploadAddress FallbackUploadAddress `json:"FallbackUploadAddress"` + InnerUploadAddress InnerUploadAddress `json:"InnerUploadAddress"` + RequestID string `json:"RequestId"` + SDKParam interface{} `json:"SDKParam"` +} + +type UploadConfigResp struct { + ResponseMetadata `json:"ResponseMetadata"` + Result UploadConfig `json:"Result"` +} + +// StoreInfo 存储信息 +type StoreInfo struct { + StoreURI string `json:"StoreUri"` + Auth string `json:"Auth"` + UploadID string `json:"UploadID"` + UploadHeader map[string]interface{} `json:"UploadHeader,omitempty"` + StorageHeader map[string]interface{} `json:"StorageHeader,omitempty"` +} + +// UploadAddress 上传地址信息 +type UploadAddress struct { + StoreInfos []StoreInfo `json:"StoreInfos"` + UploadHosts []string `json:"UploadHosts"` + UploadHeader map[string]interface{} `json:"UploadHeader"` + SessionKey string `json:"SessionKey"` + Cloud string `json:"Cloud"` +} + +// FallbackUploadAddress 备用上传地址 +type FallbackUploadAddress struct { + StoreInfos []StoreInfo `json:"StoreInfos"` + UploadHosts []string `json:"UploadHosts"` + UploadHeader map[string]interface{} `json:"UploadHeader"` + SessionKey string `json:"SessionKey"` + Cloud string `json:"Cloud"` +} + +// UploadNode 上传节点信息 +type UploadNode struct { + Vid string `json:"Vid"` + Vids []string `json:"Vids"` + StoreInfos []StoreInfo `json:"StoreInfos"` + UploadHost string `json:"UploadHost"` + UploadHeader map[string]interface{} `json:"UploadHeader"` + Type string `json:"Type"` + Protocol string `json:"Protocol"` + SessionKey string `json:"SessionKey"` + NodeConfig struct { + UploadMode string `json:"UploadMode"` + } `json:"NodeConfig"` + Cluster string `json:"Cluster"` +} + +// AdvanceOption 高级选项 +type AdvanceOption struct { + Parallel int `json:"Parallel"` + Stream int `json:"Stream"` + SliceSize int `json:"SliceSize"` + EncryptionKey string `json:"EncryptionKey"` +} + +// InnerUploadAddress 内部上传地址 +type InnerUploadAddress struct { + UploadNodes []UploadNode `json:"UploadNodes"` + AdvanceOption AdvanceOption `json:"AdvanceOption"` +} + +// UploadPart 上传分片信息 +type UploadPart struct { + UploadId string `json:"uploadid,omitempty"` + PartNumber string `json:"part_number,omitempty"` + Crc32 string `json:"crc32,omitempty"` + Etag string `json:"etag,omitempty"` + Mode string `json:"mode,omitempty"` +} + +// UploadResp 上传响应体 +type UploadResp struct { + Code int `json:"code"` + ApiVersion string `json:"apiversion"` + Message string `json:"message"` + Data UploadPart `json:"data"` +} + +type VideoCommitUpload struct { + Vid string `json:"Vid"` + VideoMeta struct { + URI string `json:"Uri"` + Height int `json:"Height"` + Width int `json:"Width"` + OriginHeight int `json:"OriginHeight"` + OriginWidth int `json:"OriginWidth"` + Duration float64 `json:"Duration"` + Bitrate int `json:"Bitrate"` + Md5 string `json:"Md5"` + Format string `json:"Format"` + Size int `json:"Size"` + FileType string `json:"FileType"` + Codec string `json:"Codec"` + } `json:"VideoMeta"` + WorkflowInput struct { + TemplateID string `json:"TemplateId"` + } `json:"WorkflowInput"` + GetPosterMode string `json:"GetPosterMode"` +} + +type VideoCommitUploadResp struct { + ResponseMetadata ResponseMetadata `json:"ResponseMetadata"` + Result struct { + RequestID string `json:"RequestId"` + Results []VideoCommitUpload `json:"Results"` + } `json:"Result"` +} + +type CommonResp struct { + Code int `json:"code"` + Msg string `json:"msg,omitempty"` + Message string `json:"message,omitempty"` // 错误情况下的消息 + Data json.RawMessage `json:"data,omitempty"` // 原始数据,稍后解析 + Error *struct { + Code int `json:"code"` + Message string `json:"message"` + Locale string `json:"locale"` + } `json:"error,omitempty"` +} + +// IsSuccess 判断响应是否成功 +func (r *CommonResp) IsSuccess() bool { + return r.Code == 0 +} + +// GetError 获取错误信息 +func (r *CommonResp) GetError() error { + if r.IsSuccess() { + return nil + } + // 优先使用message字段 + errMsg := r.Message + if errMsg == "" { + errMsg = r.Msg + } + // 如果error对象存在且有详细消息,则使用error中的信息 + if r.Error != nil && r.Error.Message != "" { + errMsg = r.Error.Message + } + + return fmt.Errorf("[doubao] API error (code: %d): %s", r.Code, errMsg) +} + +// UnmarshalData 将data字段解析为指定类型 +func (r *CommonResp) UnmarshalData(v interface{}) error { + if !r.IsSuccess() { + return r.GetError() + } + + if len(r.Data) == 0 { + return nil + } + + return json.Unmarshal(r.Data, v) +} diff --git a/drivers/doubao/util.go b/drivers/doubao/util.go new file mode 100644 index 00000000..348c0aa0 --- /dev/null +++ b/drivers/doubao/util.go @@ -0,0 +1,970 @@ +package doubao + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/errgroup" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/avast/retry-go" + "github.com/go-resty/resty/v2" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + "hash/crc32" + "io" + "math" + "math/rand" + "net/http" + "net/url" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DirectoryType = 1 + FileType = 2 + LinkType = 3 + ImageType = 4 + PagesType = 5 + VideoType = 6 + AudioType = 7 + MeetingMinutesType = 8 +) + +var FileNodeType = map[int]string{ + 1: "directory", + 2: "file", + 3: "link", + 4: "image", + 5: "pages", + 6: "video", + 7: "audio", + 8: "meeting_minutes", +} + +const ( + BaseURL = "https://www.doubao.com" + FileDataType = "file" + ImgDataType = "image" + VideoDataType = "video" + DefaultChunkSize = int64(5 * 1024 * 1024) // 5MB + MaxRetryAttempts = 3 // 最大重试次数 + UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" + Region = "cn-north-1" + UploadTimeout = 3 * time.Minute +) + +// do others that not defined in Driver interface +func (d *Doubao) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + reqUrl := BaseURL + path + req := base.RestyClient.R() + req.SetHeader("Cookie", d.Cookie) + if callback != nil { + callback(req) + } + + var commonResp CommonResp + + res, err := req.Execute(method, reqUrl) + log.Debugln(res.String()) + if err != nil { + return nil, err + } + + body := res.Body() + // 先解析为通用响应 + if err = json.Unmarshal(body, &commonResp); err != nil { + return nil, err + } + // 检查响应是否成功 + if !commonResp.IsSuccess() { + return body, commonResp.GetError() + } + + if resp != nil { + if err = json.Unmarshal(body, resp); err != nil { + return body, err + } + } + + return body, nil +} + +func (d *Doubao) getFiles(dirId, cursor string) (resp []File, err error) { + var r NodeInfoResp + + var body = base.Json{ + "node_id": dirId, + } + // 如果有游标,则设置游标和大小 + if cursor != "" { + body["cursor"] = cursor + body["size"] = 50 + } else { + body["need_full_path"] = false + } + + _, err = d.request("/samantha/aispace/node_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(body) + }, &r) + if err != nil { + return nil, err + } + + if r.Data.Children != nil { + resp = r.Data.Children + } + + if r.Data.NextCursor != "-1" { + // 递归获取下一页 + nextFiles, err := d.getFiles(dirId, r.Data.NextCursor) + if err != nil { + return nil, err + } + + resp = append(r.Data.Children, nextFiles...) + } + + return resp, err +} + +func (d *Doubao) getUserInfo() (UserInfo, error) { + var r UserInfoResp + + _, err := d.request("/passport/account/info/v2/", http.MethodGet, nil, &r) + if err != nil { + return UserInfo{}, err + } + + return r.Data, err +} + +// 签名请求 +func (d *Doubao) signRequest(req *resty.Request, method, tokenType, uploadUrl string) error { + parsedUrl, err := url.Parse(uploadUrl) + if err != nil { + return fmt.Errorf("invalid URL format: %w", err) + } + + var accessKeyId, secretAccessKey, sessionToken string + var serviceName string + + if tokenType == VideoDataType { + accessKeyId = d.UploadToken.Samantha.StsToken.AccessKeyID + secretAccessKey = d.UploadToken.Samantha.StsToken.SecretAccessKey + sessionToken = d.UploadToken.Samantha.StsToken.SessionToken + serviceName = "vod" + } else { + accessKeyId = d.UploadToken.Alice[tokenType].Auth.AccessKeyID + secretAccessKey = d.UploadToken.Alice[tokenType].Auth.SecretAccessKey + sessionToken = d.UploadToken.Alice[tokenType].Auth.SessionToken + serviceName = "imagex" + } + + // 当前时间,格式为 ISO8601 + now := time.Now().UTC() + amzDate := now.Format("20060102T150405Z") + dateStamp := now.Format("20060102") + + req.SetHeader("X-Amz-Date", amzDate) + + if sessionToken != "" { + req.SetHeader("X-Amz-Security-Token", sessionToken) + } + + // 计算请求体的SHA256哈希 + var bodyHash string + if req.Body != nil { + bodyBytes, ok := req.Body.([]byte) + if !ok { + return fmt.Errorf("request body must be []byte") + } + + bodyHash = hashSHA256(string(bodyBytes)) + req.SetHeader("X-Amz-Content-Sha256", bodyHash) + } else { + bodyHash = hashSHA256("") + } + + // 创建规范请求 + canonicalURI := parsedUrl.Path + if canonicalURI == "" { + canonicalURI = "/" + } + + // 查询参数按照字母顺序排序 + canonicalQueryString := getCanonicalQueryString(req.QueryParam) + // 规范请求头 + canonicalHeaders, signedHeaders := getCanonicalHeadersFromMap(req.Header) + canonicalRequest := method + "\n" + + canonicalURI + "\n" + + canonicalQueryString + "\n" + + canonicalHeaders + "\n" + + signedHeaders + "\n" + + bodyHash + + algorithm := "AWS4-HMAC-SHA256" + credentialScope := fmt.Sprintf("%s/%s/%s/aws4_request", dateStamp, Region, serviceName) + + stringToSign := algorithm + "\n" + + amzDate + "\n" + + credentialScope + "\n" + + hashSHA256(canonicalRequest) + // 计算签名密钥 + signingKey := getSigningKey(secretAccessKey, dateStamp, Region, serviceName) + // 计算签名 + signature := hmacSHA256Hex(signingKey, stringToSign) + // 构建授权头 + authorizationHeader := fmt.Sprintf( + "%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", + algorithm, + accessKeyId, + credentialScope, + signedHeaders, + signature, + ) + + req.SetHeader("Authorization", authorizationHeader) + + return nil +} + +func (d *Doubao) requestApi(url, method, tokenType string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + req := base.RestyClient.R() + req.SetHeaders(map[string]string{ + "user-agent": UserAgent, + }) + + if method == http.MethodPost { + req.SetHeader("Content-Type", "text/plain;charset=UTF-8") + } + + if callback != nil { + callback(req) + } + + if resp != nil { + req.SetResult(resp) + } + + // 使用自定义AWS SigV4签名 + err := d.signRequest(req, method, tokenType, url) + if err != nil { + return nil, err + } + + res, err := req.Execute(method, url) + if err != nil { + return nil, err + } + + return res.Body(), nil +} + +func (d *Doubao) initUploadToken() (*UploadToken, error) { + uploadToken := &UploadToken{ + Alice: make(map[string]UploadAuthToken), + Samantha: MediaUploadAuthToken{}, + } + + fileAuthToken, err := d.getUploadAuthToken(FileDataType) + if err != nil { + return nil, err + } + + imgAuthToken, err := d.getUploadAuthToken(ImgDataType) + if err != nil { + return nil, err + } + + mediaAuthToken, err := d.getSamantaUploadAuthToken() + if err != nil { + return nil, err + } + + uploadToken.Alice[FileDataType] = fileAuthToken + uploadToken.Alice[ImgDataType] = imgAuthToken + uploadToken.Samantha = mediaAuthToken + + return uploadToken, nil +} + +func (d *Doubao) getUploadAuthToken(dataType string) (ut UploadAuthToken, err error) { + var r UploadAuthTokenResp + _, err = d.request("/alice/upload/auth_token", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "scene": "bot_chat", + "data_type": dataType, + }) + }, &r) + + return r.Data, err +} + +func (d *Doubao) getSamantaUploadAuthToken() (mt MediaUploadAuthToken, err error) { + var r MediaUploadAuthTokenResp + _, err = d.request("/samantha/media/get_upload_token", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{}) + }, &r) + + return r.Data, err +} + +// getUploadConfig 获取上传配置信息 +func (d *Doubao) getUploadConfig(upConfig *UploadConfig, dataType string, file model.FileStreamer) error { + tokenType := dataType + // 配置参数函数 + configureParams := func() (string, map[string]string) { + var uploadUrl string + var params map[string]string + // 根据数据类型设置不同的上传参数 + switch dataType { + case VideoDataType: + // 音频/视频类型 - 使用uploadToken.Samantha的配置 + uploadUrl = d.UploadToken.Samantha.UploadInfo.VideoHost + params = map[string]string{ + "Action": "ApplyUploadInner", + "Version": "2020-11-19", + "SpaceName": d.UploadToken.Samantha.UploadInfo.SpaceName, + "FileType": "video", + "IsInner": "1", + "NeedFallback": "true", + "FileSize": strconv.FormatInt(file.GetSize(), 10), + "s": randomString(), + } + case ImgDataType, FileDataType: + // 图片或其他文件类型 - 使用uploadToken.Alice对应配置 + uploadUrl = "https://" + d.UploadToken.Alice[dataType].UploadHost + params = map[string]string{ + "Action": "ApplyImageUpload", + "Version": "2018-08-01", + "ServiceId": d.UploadToken.Alice[dataType].ServiceID, + "NeedFallback": "true", + "FileSize": strconv.FormatInt(file.GetSize(), 10), + "FileExtension": filepath.Ext(file.GetName()), + "s": randomString(), + } + } + return uploadUrl, params + } + + // 获取初始参数 + uploadUrl, params := configureParams() + + tokenRefreshed := false + var configResp UploadConfigResp + + err := d._retryOperation("get upload_config", func() error { + configResp = UploadConfigResp{} + + _, err := d.requestApi(uploadUrl, http.MethodGet, tokenType, func(req *resty.Request) { + req.SetQueryParams(params) + }, &configResp) + if err != nil { + return err + } + + if configResp.ResponseMetadata.Error.Code == "" { + *upConfig = configResp.Result + return nil + } + + // 100028 凭证过期 + if configResp.ResponseMetadata.Error.CodeN == 100028 && !tokenRefreshed { + log.Debugln("[doubao] Upload token expired, re-fetching...") + newToken, err := d.initUploadToken() + if err != nil { + return fmt.Errorf("failed to refresh token: %w", err) + } + + d.UploadToken = newToken + tokenRefreshed = true + uploadUrl, params = configureParams() + + return retry.Error{errors.New("token refreshed, retry needed")} + } + + return fmt.Errorf("get upload_config failed: %s", configResp.ResponseMetadata.Error.Message) + }) + + return err +} + +// uploadNode 上传 文件信息 +func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file model.FileStreamer, dataType string) (UploadNodeResp, error) { + reqUuid := uuid.New().String() + var key string + var nodeType int + + mimetype := file.GetMimetype() + switch dataType { + case VideoDataType: + key = uploadConfig.InnerUploadAddress.UploadNodes[0].Vid + if strings.HasPrefix(mimetype, "audio/") { + nodeType = AudioType // 音频类型 + } else { + nodeType = VideoType // 视频类型 + } + case ImgDataType: + key = uploadConfig.InnerUploadAddress.UploadNodes[0].StoreInfos[0].StoreURI + nodeType = ImageType // 图片类型 + default: // FileDataType + key = uploadConfig.InnerUploadAddress.UploadNodes[0].StoreInfos[0].StoreURI + nodeType = FileType // 文件类型 + } + + var r UploadNodeResp + _, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "node_list": []base.Json{ + { + "local_id": reqUuid, + "parent_id": dir.GetID(), + "name": file.GetName(), + "key": key, + "node_content": base.Json{}, + "node_type": nodeType, + "size": file.GetSize(), + }, + }, + "request_id": reqUuid, + }) + }, &r) + + return r, err +} + +// Upload 普通上传实现 +func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) { + data, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + // 计算CRC32 + crc32Hash := crc32.NewIEEE() + crc32Hash.Write(data) + crc32Value := hex.EncodeToString(crc32Hash.Sum(nil)) + + // 构建请求路径 + uploadNode := config.InnerUploadAddress.UploadNodes[0] + storeInfo := uploadNode.StoreInfos[0] + uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI) + + uploadResp := UploadResp{} + + if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { + req.SetHeaders(map[string]string{ + "Content-Type": "application/octet-stream", + "Content-Crc32": crc32Value, + "Content-Length": fmt.Sprintf("%d", len(data)), + "Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)), + }) + + req.SetBody(data) + }, &uploadResp); err != nil { + return nil, err + } + + if uploadResp.Code != 2000 { + return nil, fmt.Errorf("upload failed: %s", uploadResp.Message) + } + + uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType) + if err != nil { + return nil, err + } + + return &model.Object{ + ID: uploadNodeResp.Data.NodeList[0].ID, + Name: uploadNodeResp.Data.NodeList[0].Name, + Size: file.GetSize(), + IsFolder: false, + }, nil +} + +// UploadByMultipart 分片上传 +func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fileSize int64, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) { + // 构建请求路径 + uploadNode := config.InnerUploadAddress.UploadNodes[0] + storeInfo := uploadNode.StoreInfos[0] + uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI) + // 初始化分片上传 + var uploadID string + err := d._retryOperation("Initialize multipart upload", func() error { + var err error + uploadID, err = d.initMultipartUpload(config, uploadUrl, storeInfo) + return err + }) + if err != nil { + return nil, fmt.Errorf("failed to initialize multipart upload: %w", err) + } + // 准备分片参数 + chunkSize := DefaultChunkSize + if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 { + chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize) + } + totalParts := (fileSize + chunkSize - 1) / chunkSize + // 创建分片信息组 + parts := make([]UploadPart, totalParts) + // 缓存文件 + tempFile, err := file.CacheFullInTempFile() + if err != nil { + return nil, fmt.Errorf("failed to cache file: %w", err) + } + defer tempFile.Close() + up(10.0) // 更新进度 + // 设置并行上传 + threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, + retry.Attempts(1), + retry.Delay(time.Second), + retry.DelayType(retry.BackOffDelay)) + + var partsMutex sync.Mutex + // 并行上传所有分片 + for partIndex := int64(0); partIndex < totalParts; partIndex++ { + if utils.IsCanceled(uploadCtx) { + break + } + partIndex := partIndex + partNumber := partIndex + 1 // 分片编号从1开始 + + threadG.Go(func(ctx context.Context) error { + // 计算此分片的大小和偏移 + offset := partIndex * chunkSize + size := chunkSize + if partIndex == totalParts-1 { + size = fileSize - offset + } + + limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size)) + // 读取数据到内存 + data, err := io.ReadAll(limitedReader) + if err != nil { + return fmt.Errorf("failed to read part %d: %w", partNumber, err) + } + // 计算CRC32 + crc32Value := calculateCRC32(data) + // 使用_retryOperation上传分片 + var uploadPart UploadPart + if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error { + var err error + uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value) + return err + }); err != nil { + return fmt.Errorf("part %d upload failed: %w", partNumber, err) + } + // 记录成功上传的分片 + partsMutex.Lock() + parts[partIndex] = UploadPart{ + PartNumber: strconv.FormatInt(partNumber, 10), + Etag: uploadPart.Etag, + Crc32: crc32Value, + } + partsMutex.Unlock() + // 更新进度 + progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts) + up(math.Min(progress, 95.0)) + + return nil + }) + } + + if err = threadG.Wait(); err != nil { + return nil, err + } + // 完成上传-分片合并 + if err = d._retryOperation("Complete multipart upload", func() error { + return d.completeMultipartUpload(config, uploadUrl, uploadID, parts) + }); err != nil { + return nil, fmt.Errorf("failed to complete multipart upload: %w", err) + } + // 提交上传 + if err = d._retryOperation("Commit upload", func() error { + return d.commitMultipartUpload(config) + }); err != nil { + return nil, fmt.Errorf("failed to commit upload: %w", err) + } + + up(98.0) // 更新到98% + // 上传节点信息 + var uploadNodeResp UploadNodeResp + + if err = d._retryOperation("Upload node", func() error { + var err error + uploadNodeResp, err = d.uploadNode(config, dstDir, file, dataType) + return err + }); err != nil { + return nil, fmt.Errorf("failed to upload node: %w", err) + } + + up(100.0) // 完成上传 + + return &model.Object{ + ID: uploadNodeResp.Data.NodeList[0].ID, + Name: uploadNodeResp.Data.NodeList[0].Name, + Size: file.GetSize(), + IsFolder: false, + }, nil +} + +// 统一上传请求方法 +func (d *Doubao) uploadRequest(uploadUrl string, method string, storeInfo StoreInfo, callback base.ReqCallback, resp interface{}) ([]byte, error) { + client := resty.New() + client.SetTransport(&http.Transport{ + DisableKeepAlives: true, // 禁用连接复用 + ForceAttemptHTTP2: false, // 强制使用HTTP/1.1 + }) + client.SetTimeout(UploadTimeout) + + req := client.R() + req.SetHeaders(map[string]string{ + "Host": strings.Split(uploadUrl, "/")[2], + "Referer": BaseURL + "/", + "Origin": BaseURL, + "User-Agent": UserAgent, + "X-Storage-U": d.UserId, + "Authorization": storeInfo.Auth, + }) + + if method == http.MethodPost { + req.SetHeader("Content-Type", "text/plain;charset=UTF-8") + } + + if callback != nil { + callback(req) + } + + if resp != nil { + req.SetResult(resp) + } + + res, err := req.Execute(method, uploadUrl) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("upload request failed: %w", err) + } + + return res.Body(), nil +} + +// 初始化分片上传 +func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, storeInfo StoreInfo) (uploadId string, err error) { + uploadResp := UploadResp{} + + _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { + req.SetQueryParams(map[string]string{ + "uploadmode": "part", + "phase": "init", + }) + }, &uploadResp) + + if err != nil { + return uploadId, err + } + + if uploadResp.Code != 2000 { + return uploadId, fmt.Errorf("init upload failed: %s", uploadResp.Message) + } + + return uploadResp.Data.UploadId, nil +} + +// 分片上传实现 +func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) { + uploadResp := UploadResp{} + storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0] + + _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { + req.SetHeaders(map[string]string{ + "Content-Type": "application/octet-stream", + "Content-Crc32": crc32Value, + "Content-Length": fmt.Sprintf("%d", len(data)), + "Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)), + }) + + req.SetQueryParams(map[string]string{ + "uploadid": uploadID, + "part_number": strconv.FormatInt(partNumber, 10), + "phase": "transfer", + }) + + req.SetBody(data) + req.SetContentLength(true) + }, &uploadResp) + + if err != nil { + return resp, err + } + + if uploadResp.Code != 2000 { + return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message) + } else if uploadResp.Data.Crc32 != crc32Value { + return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32) + } + + return uploadResp.Data, nil +} + +// 完成分片上传 +func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error { + uploadResp := UploadResp{} + + storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0] + + body := _convertUploadParts(parts) + + err := utils.Retry(MaxRetryAttempts, time.Second, func() (err error) { + _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) { + req.SetQueryParams(map[string]string{ + "uploadid": uploadID, + "phase": "finish", + "uploadmode": "part", + }) + req.SetBody(body) + }, &uploadResp) + + if err != nil { + return err + } + // 检查响应状态码 2000 成功 4024 分片合并中 + if uploadResp.Code != 2000 && uploadResp.Code != 4024 { + return fmt.Errorf("finish upload failed: %s", uploadResp.Message) + } + + return err + }) + + if err != nil { + return fmt.Errorf("failed to complete multipart upload: %w", err) + } + + return nil +} + +func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error { + uploadUrl := d.UploadToken.Samantha.UploadInfo.VideoHost + params := map[string]string{ + "Action": "CommitUploadInner", + "Version": "2020-11-19", + "SpaceName": d.UploadToken.Samantha.UploadInfo.SpaceName, + } + tokenType := VideoDataType + + videoCommitUploadResp := VideoCommitUploadResp{} + + jsonBytes, err := json.Marshal(base.Json{ + "SessionKey": uploadConfig.InnerUploadAddress.UploadNodes[0].SessionKey, + "Functions": []base.Json{}, + }) + if err != nil { + return fmt.Errorf("failed to marshal request data: %w", err) + } + + _, err = d.requestApi(uploadUrl, http.MethodPost, tokenType, func(req *resty.Request) { + req.SetHeader("Content-Type", "application/json") + req.SetQueryParams(params) + req.SetBody(jsonBytes) + + }, &videoCommitUploadResp) + if err != nil { + return err + } + + return nil +} + +// 计算CRC32 +func calculateCRC32(data []byte) string { + hash := crc32.NewIEEE() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// _retryOperation 操作重试 +func (d *Doubao) _retryOperation(operation string, fn func() error) error { + return retry.Do( + fn, + retry.Attempts(MaxRetryAttempts), + retry.Delay(500*time.Millisecond), + retry.DelayType(retry.BackOffDelay), + retry.MaxJitter(200*time.Millisecond), + retry.OnRetry(func(n uint, err error) { + log.Debugf("[doubao] %s retry #%d: %v", operation, n+1, err) + }), + ) +} + +// _convertUploadParts 将分片信息转换为字符串 +func _convertUploadParts(parts []UploadPart) string { + if len(parts) == 0 { + return "" + } + + var result strings.Builder + + for i, part := range parts { + if i > 0 { + result.WriteString(",") + } + result.WriteString(fmt.Sprintf("%s:%s", part.PartNumber, part.Crc32)) + } + + return result.String() +} + +// 获取规范查询字符串 +func getCanonicalQueryString(query url.Values) string { + if len(query) == 0 { + return "" + } + + keys := make([]string, 0, len(query)) + for k := range query { + keys = append(keys, k) + } + sort.Strings(keys) + + parts := make([]string, 0, len(keys)) + for _, k := range keys { + values := query[k] + for _, v := range values { + parts = append(parts, urlEncode(k)+"="+urlEncode(v)) + } + } + + return strings.Join(parts, "&") +} + +func urlEncode(s string) string { + s = url.QueryEscape(s) + s = strings.ReplaceAll(s, "+", "%20") + return s +} + +// 获取规范头信息和已签名头列表 +func getCanonicalHeadersFromMap(headers map[string][]string) (string, string) { + // 不可签名的头部列表 + unsignableHeaders := map[string]bool{ + "authorization": true, + "content-type": true, + "content-length": true, + "user-agent": true, + "presigned-expires": true, + "expect": true, + "x-amzn-trace-id": true, + } + headerValues := make(map[string]string) + var signedHeadersList []string + + for k, v := range headers { + if len(v) == 0 { + continue + } + + lowerKey := strings.ToLower(k) + // 检查是否可签名 + if strings.HasPrefix(lowerKey, "x-amz-") || !unsignableHeaders[lowerKey] { + value := strings.TrimSpace(v[0]) + value = strings.Join(strings.Fields(value), " ") + headerValues[lowerKey] = value + signedHeadersList = append(signedHeadersList, lowerKey) + } + } + + sort.Strings(signedHeadersList) + + var canonicalHeadersStr strings.Builder + for _, key := range signedHeadersList { + canonicalHeadersStr.WriteString(key) + canonicalHeadersStr.WriteString(":") + canonicalHeadersStr.WriteString(headerValues[key]) + canonicalHeadersStr.WriteString("\n") + } + + signedHeaders := strings.Join(signedHeadersList, ";") + + return canonicalHeadersStr.String(), signedHeaders +} + +// 计算HMAC-SHA256 +func hmacSHA256(key []byte, data string) []byte { + h := hmac.New(sha256.New, key) + h.Write([]byte(data)) + return h.Sum(nil) +} + +// 计算HMAC-SHA256并返回十六进制字符串 +func hmacSHA256Hex(key []byte, data string) string { + return hex.EncodeToString(hmacSHA256(key, data)) +} + +// 计算SHA256哈希并返回十六进制字符串 +func hashSHA256(data string) string { + h := sha256.New() + h.Write([]byte(data)) + return hex.EncodeToString(h.Sum(nil)) +} + +// 获取签名密钥 +func getSigningKey(secretKey, dateStamp, region, service string) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), dateStamp) + kRegion := hmacSHA256(kDate, region) + kService := hmacSHA256(kRegion, service) + kSigning := hmacSHA256(kService, "aws4_request") + return kSigning +} + +// generateContentDisposition 生成符合RFC 5987标准的Content-Disposition头部 +func generateContentDisposition(filename string) string { + // 按照RFC 2047进行编码,用于filename部分 + encodedName := urlEncode(filename) + + // 按照RFC 5987进行编码,用于filename*部分 + encodedNameRFC5987 := encodeRFC5987(filename) + + return fmt.Sprintf("attachment; filename=\"%s\"; filename*=utf-8''%s", + encodedName, encodedNameRFC5987) +} + +// encodeRFC5987 按照RFC 5987规范编码字符串,适用于HTTP头部参数中的非ASCII字符 +func encodeRFC5987(s string) string { + var buf strings.Builder + for _, r := range []byte(s) { + // 根据RFC 5987,只有字母、数字和部分特殊符号可以不编码 + if (r >= 'a' && r <= 'z') || + (r >= 'A' && r <= 'Z') || + (r >= '0' && r <= '9') || + r == '-' || r == '.' || r == '_' || r == '~' { + buf.WriteByte(r) + } else { + // 其他字符都需要百分号编码 + fmt.Fprintf(&buf, "%%%02X", r) + } + } + return buf.String() +} + +func randomString() string { + const charset = "0123456789abcdefghijklmnopqrstuvwxyz" + const length = 11 // 11位随机字符串 + + var sb strings.Builder + sb.Grow(length) + + for i := 0; i < length; i++ { + sb.WriteByte(charset[rand.Intn(len(charset))]) + } + + return sb.String() +} diff --git a/drivers/doubao_share/driver.go b/drivers/doubao_share/driver.go new file mode 100644 index 00000000..61076d1e --- /dev/null +++ b/drivers/doubao_share/driver.go @@ -0,0 +1,177 @@ +package doubao_share + +import ( + "context" + "errors" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/go-resty/resty/v2" + "net/http" +) + +type DoubaoShare struct { + model.Storage + Addition + RootFiles []RootFileList +} + +func (d *DoubaoShare) Config() driver.Config { + return config +} + +func (d *DoubaoShare) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *DoubaoShare) Init(ctx context.Context) error { + // 初始化 虚拟分享列表 + if err := d.initShareList(); err != nil { + return err + } + + return nil +} + +func (d *DoubaoShare) Drop(ctx context.Context) error { + return nil +} + +func (d *DoubaoShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + // 检查是否为根目录 + if dir.GetID() == "" && dir.GetPath() == "/" { + return d.listRootDirectory(ctx) + } + + // 非根目录,处理不同情况 + if fo, ok := dir.(*FileObject); ok { + if fo.ShareID == "" { + // 虚拟目录,需要列出子目录 + return d.listVirtualDirectoryContent(dir) + } else { + // 具有分享ID的目录,获取此分享下的文件 + shareId, relativePath, err := d._findShareAndPath(dir) + if err != nil { + return nil, err + } + return d.getFilesInPath(ctx, shareId, dir.GetID(), relativePath) + } + } + + // 使用通用方法 + shareId, relativePath, err := d._findShareAndPath(dir) + if err != nil { + return nil, err + } + + // 获取指定路径下的文件 + return d.getFilesInPath(ctx, shareId, dir.GetID(), relativePath) +} + +func (d *DoubaoShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var downloadUrl string + + if u, ok := file.(*FileObject); ok { + switch u.NodeType { + case VideoType, AudioType: + var r GetVideoFileUrlResp + _, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "key": u.Key, + "share_id": u.ShareID, + "node_id": file.GetID(), + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.OriginalMediaInfo.MainURL + default: + var r GetFileUrlResp + _, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "uris": []string{u.Key}, + "type": FileNodeType[u.NodeType], + }) + }, &r) + if err != nil { + return nil, err + } + + downloadUrl = r.Data.FileUrls[0].MainURL + } + + // 生成标准的Content-Disposition + contentDisposition := generateContentDisposition(u.Name) + + return &model.Link{ + URL: downloadUrl, + Header: http.Header{ + "User-Agent": []string{UserAgent}, + "Content-Disposition": []string{contentDisposition}, + }, + }, nil + } + + return nil, errors.New("can't convert obj to URL") +} + +func (d *DoubaoShare) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + // TODO create folder, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO move obj, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + // TODO rename obj, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO copy obj, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Remove(ctx context.Context, obj model.Obj) error { + // TODO remove obj, optional + return errs.NotImplement +} + +func (d *DoubaoShare) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + // TODO upload file, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *DoubaoShare) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { + // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional + // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir + // return errs.NotImplement to use an internal archive tool + return nil, errs.NotImplement +} + +//func (d *DoubaoShare) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*DoubaoShare)(nil) diff --git a/drivers/doubao_share/meta.go b/drivers/doubao_share/meta.go new file mode 100644 index 00000000..a749eefb --- /dev/null +++ b/drivers/doubao_share/meta.go @@ -0,0 +1,32 @@ +package doubao_share + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootPath + Cookie string `json:"cookie" type:"text"` + ShareIds string `json:"share_ids" type:"text" required:"true"` +} + +var config = driver.Config{ + Name: "DoubaoShare", + LocalSort: true, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: true, + NeedMs: false, + DefaultRoot: "/", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &DoubaoShare{} + }) +} diff --git a/drivers/doubao_share/types.go b/drivers/doubao_share/types.go new file mode 100644 index 00000000..46f226fa --- /dev/null +++ b/drivers/doubao_share/types.go @@ -0,0 +1,207 @@ +package doubao_share + +import ( + "encoding/json" + "fmt" + "github.com/alist-org/alist/v3/internal/model" +) + +type BaseResp struct { + Code int `json:"code"` + Msg string `json:"msg"` +} + +type NodeInfoData struct { + Share ShareInfo `json:"share,omitempty"` + Creator CreatorInfo `json:"creator,omitempty"` + NodeList []File `json:"node_list,omitempty"` + NodeInfo File `json:"node_info,omitempty"` + Children []File `json:"children,omitempty"` + Path FilePath `json:"path,omitempty"` + NextCursor string `json:"next_cursor,omitempty"` + HasMore bool `json:"has_more,omitempty"` +} + +type NodeInfoResp struct { + BaseResp + NodeInfoData `json:"data"` +} + +type RootFileList struct { + ShareID string + VirtualPath string + NodeInfo NodeInfoData + Child *[]RootFileList +} + +type File struct { + ID string `json:"id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` + Size int64 `json:"size"` + Source int `json:"source"` + NameReviewStatus int `json:"name_review_status"` + ContentReviewStatus int `json:"content_review_status"` + RiskReviewStatus int `json:"risk_review_status"` + ConversationID string `json:"conversation_id"` + ParentID string `json:"parent_id"` + CreateTime int64 `json:"create_time"` + UpdateTime int64 `json:"update_time"` +} + +type FileObject struct { + model.Object + ShareID string + Key string + NodeID string + NodeType int +} + +type ShareInfo struct { + ShareID string `json:"share_id"` + FirstNode struct { + ID string `json:"id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` + Size int `json:"size"` + Source int `json:"source"` + Content struct { + LinkFileType string `json:"link_file_type"` + ImageWidth int `json:"image_width"` + ImageHeight int `json:"image_height"` + AiSkillStatus int `json:"ai_skill_status"` + } `json:"content"` + NameReviewStatus int `json:"name_review_status"` + ContentReviewStatus int `json:"content_review_status"` + RiskReviewStatus int `json:"risk_review_status"` + ConversationID string `json:"conversation_id"` + ParentID string `json:"parent_id"` + CreateTime int `json:"create_time"` + UpdateTime int `json:"update_time"` + } `json:"first_node"` + NodeCount int `json:"node_count"` + CreateTime int `json:"create_time"` + Channel string `json:"channel"` + InfluencerType int `json:"influencer_type"` +} + +type CreatorInfo struct { + EntityID string `json:"entity_id"` + UserName string `json:"user_name"` + NickName string `json:"nick_name"` + Avatar struct { + OriginURL string `json:"origin_url"` + TinyURL string `json:"tiny_url"` + URI string `json:"uri"` + } `json:"avatar"` +} + +type FilePath []struct { + ID string `json:"id"` + Name string `json:"name"` + Key string `json:"key"` + NodeType int `json:"node_type"` + Size int `json:"size"` + Source int `json:"source"` + NameReviewStatus int `json:"name_review_status"` + ContentReviewStatus int `json:"content_review_status"` + RiskReviewStatus int `json:"risk_review_status"` + ConversationID string `json:"conversation_id"` + ParentID string `json:"parent_id"` + CreateTime int `json:"create_time"` + UpdateTime int `json:"update_time"` +} + +type GetFileUrlResp struct { + BaseResp + Data struct { + FileUrls []struct { + URI string `json:"uri"` + MainURL string `json:"main_url"` + BackURL string `json:"back_url"` + } `json:"file_urls"` + } `json:"data"` +} + +type GetVideoFileUrlResp struct { + BaseResp + Data struct { + MediaType string `json:"media_type"` + MediaInfo []struct { + Meta struct { + Height string `json:"height"` + Width string `json:"width"` + Format string `json:"format"` + Duration float64 `json:"duration"` + CodecType string `json:"codec_type"` + Definition string `json:"definition"` + } `json:"meta"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"media_info"` + OriginalMediaInfo struct { + Meta struct { + Height string `json:"height"` + Width string `json:"width"` + Format string `json:"format"` + Duration float64 `json:"duration"` + CodecType string `json:"codec_type"` + Definition string `json:"definition"` + } `json:"meta"` + MainURL string `json:"main_url"` + BackupURL string `json:"backup_url"` + } `json:"original_media_info"` + PosterURL string `json:"poster_url"` + PlayableStatus int `json:"playable_status"` + } `json:"data"` +} + +type CommonResp struct { + Code int `json:"code"` + Msg string `json:"msg,omitempty"` + Message string `json:"message,omitempty"` // 错误情况下的消息 + Data json.RawMessage `json:"data,omitempty"` // 原始数据,稍后解析 + Error *struct { + Code int `json:"code"` + Message string `json:"message"` + Locale string `json:"locale"` + } `json:"error,omitempty"` +} + +// IsSuccess 判断响应是否成功 +func (r *CommonResp) IsSuccess() bool { + return r.Code == 0 +} + +// GetError 获取错误信息 +func (r *CommonResp) GetError() error { + if r.IsSuccess() { + return nil + } + // 优先使用message字段 + errMsg := r.Message + if errMsg == "" { + errMsg = r.Msg + } + // 如果error对象存在且有详细消息,则使用error中的信息 + if r.Error != nil && r.Error.Message != "" { + errMsg = r.Error.Message + } + + return fmt.Errorf("[doubao] API error (code: %d): %s", r.Code, errMsg) +} + +// UnmarshalData 将data字段解析为指定类型 +func (r *CommonResp) UnmarshalData(v interface{}) error { + if !r.IsSuccess() { + return r.GetError() + } + + if len(r.Data) == 0 { + return nil + } + + return json.Unmarshal(r.Data, v) +} diff --git a/drivers/doubao_share/util.go b/drivers/doubao_share/util.go new file mode 100644 index 00000000..e0fc526e --- /dev/null +++ b/drivers/doubao_share/util.go @@ -0,0 +1,744 @@ +package doubao_share + +import ( + "context" + "encoding/json" + "fmt" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/model" + "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" + "net/http" + "net/url" + "path" + "regexp" + "strings" + "time" +) + +const ( + DirectoryType = 1 + FileType = 2 + LinkType = 3 + ImageType = 4 + PagesType = 5 + VideoType = 6 + AudioType = 7 + MeetingMinutesType = 8 +) + +var FileNodeType = map[int]string{ + 1: "directory", + 2: "file", + 3: "link", + 4: "image", + 5: "pages", + 6: "video", + 7: "audio", + 8: "meeting_minutes", +} + +const ( + BaseURL = "https://www.doubao.com" + FileDataType = "file" + ImgDataType = "image" + VideoDataType = "video" + UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" +) + +func (d *DoubaoShare) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + reqUrl := BaseURL + path + req := base.RestyClient.R() + + req.SetHeaders(map[string]string{ + "Cookie": d.Cookie, + "User-Agent": UserAgent, + }) + + req.SetQueryParams(map[string]string{ + "version_code": "20800", + "device_platform": "web", + }) + + if callback != nil { + callback(req) + } + + var commonResp CommonResp + + res, err := req.Execute(method, reqUrl) + log.Debugln(res.String()) + if err != nil { + return nil, err + } + + body := res.Body() + // 先解析为通用响应 + if err = json.Unmarshal(body, &commonResp); err != nil { + return nil, err + } + // 检查响应是否成功 + if !commonResp.IsSuccess() { + return body, commonResp.GetError() + } + + if resp != nil { + if err = json.Unmarshal(body, resp); err != nil { + return body, err + } + } + + return body, nil +} + +func (d *DoubaoShare) getFiles(dirId, nodeId, cursor string) (resp []File, err error) { + var r NodeInfoResp + + var body = base.Json{ + "share_id": dirId, + "node_id": nodeId, + } + // 如果有游标,则设置游标和大小 + if cursor != "" { + body["cursor"] = cursor + body["size"] = 50 + } else { + body["need_full_path"] = false + } + + _, err = d.request("/samantha/aispace/share/node_info", http.MethodPost, func(req *resty.Request) { + req.SetBody(body) + }, &r) + if err != nil { + return nil, err + } + + if r.NodeInfoData.Children != nil { + resp = r.NodeInfoData.Children + } + + if r.NodeInfoData.NextCursor != "-1" { + // 递归获取下一页 + nextFiles, err := d.getFiles(dirId, nodeId, r.NodeInfoData.NextCursor) + if err != nil { + return nil, err + } + + resp = append(r.NodeInfoData.Children, nextFiles...) + } + + return resp, err +} + +func (d *DoubaoShare) getShareOverview(shareId, cursor string) (resp []File, err error) { + return d.getShareOverviewWithHistory(shareId, cursor, make(map[string]bool)) +} + +func (d *DoubaoShare) getShareOverviewWithHistory(shareId, cursor string, cursorHistory map[string]bool) (resp []File, err error) { + var r NodeInfoResp + + var body = base.Json{ + "share_id": shareId, + } + // 如果有游标,则设置游标和大小 + if cursor != "" { + body["cursor"] = cursor + body["size"] = 50 + } else { + body["need_full_path"] = false + } + + _, err = d.request("/samantha/aispace/share/overview", http.MethodPost, func(req *resty.Request) { + req.SetBody(body) + }, &r) + if err != nil { + return nil, err + } + + if r.NodeInfoData.NodeList != nil { + resp = r.NodeInfoData.NodeList + } + + if r.NodeInfoData.NextCursor != "-1" { + // 检查游标是否重复出现,防止无限循环 + if cursorHistory[r.NodeInfoData.NextCursor] { + return resp, nil + } + + // 记录当前游标 + cursorHistory[r.NodeInfoData.NextCursor] = true + + // 递归获取下一页 + nextFiles, err := d.getShareOverviewWithHistory(shareId, r.NodeInfoData.NextCursor, cursorHistory) + if err != nil { + return nil, err + } + + resp = append(resp, nextFiles...) + } + + return resp, nil +} + +func (d *DoubaoShare) initShareList() error { + if d.Addition.ShareIds == "" { + return fmt.Errorf("share_ids is empty") + } + + // 解析分享配置 + shareConfigs, rootShares, err := d._parseShareConfigs() + if err != nil { + return err + } + + // 检查路径冲突 + if err := d._detectPathConflicts(shareConfigs); err != nil { + return err + } + + // 构建树形结构 + rootMap := d._buildTreeStructure(shareConfigs, rootShares) + + // 提取顶级节点 + topLevelNodes := d._extractTopLevelNodes(rootMap, rootShares) + if len(topLevelNodes) == 0 { + return fmt.Errorf("no valid share_ids found") + } + + // 存储结果 + d.RootFiles = topLevelNodes + + return nil +} + +// 从配置中解析分享ID和路径 +func (d *DoubaoShare) _parseShareConfigs() (map[string]string, []string, error) { + shareConfigs := make(map[string]string) // 路径 -> 分享ID + rootShares := make([]string, 0) // 根目录显示的分享ID + + lines := strings.Split(strings.TrimSpace(d.Addition.ShareIds), "\n") + if len(lines) == 0 { + return nil, nil, fmt.Errorf("no share_ids found") + } + + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // 解析分享ID和路径 + parts := strings.Split(line, "|") + var shareId, sharePath string + + if len(parts) == 1 { + // 无路径分享,直接在根目录显示 + shareId = _extractShareId(parts[0]) + if shareId != "" { + rootShares = append(rootShares, shareId) + } + continue + } else if len(parts) >= 2 { + shareId = _extractShareId(parts[0]) + sharePath = strings.Trim(parts[1], "/") + } + + if shareId == "" { + log.Warnf("[doubao_share] Invalid Share_id Format: %s", line) + continue + } + + // 空路径也加入根目录显示 + if sharePath == "" { + rootShares = append(rootShares, shareId) + continue + } + + // 添加到路径映射 + shareConfigs[sharePath] = shareId + } + + return shareConfigs, rootShares, nil +} + +// 检测路径冲突 +func (d *DoubaoShare) _detectPathConflicts(shareConfigs map[string]string) error { + // 检查直接路径冲突 + pathToShareIds := make(map[string][]string) + for sharePath, id := range shareConfigs { + pathToShareIds[sharePath] = append(pathToShareIds[sharePath], id) + } + + for sharePath, ids := range pathToShareIds { + if len(ids) > 1 { + return fmt.Errorf("路径冲突: 路径 '%s' 被多个不同的分享ID使用: %s", + sharePath, strings.Join(ids, ", ")) + } + } + + // 检查层次冲突 + for path1, id1 := range shareConfigs { + for path2, id2 := range shareConfigs { + if path1 == path2 || id1 == id2 { + continue + } + + // 检查前缀冲突 + if strings.HasPrefix(path2, path1+"/") || strings.HasPrefix(path1, path2+"/") { + return fmt.Errorf("路径冲突: 路径 '%s' (ID: %s) 与路径 '%s' (ID: %s) 存在层次冲突", + path1, id1, path2, id2) + } + } + } + + return nil +} + +// 构建树形结构 +func (d *DoubaoShare) _buildTreeStructure(shareConfigs map[string]string, rootShares []string) map[string]*RootFileList { + rootMap := make(map[string]*RootFileList) + + // 添加所有分享节点 + for sharePath, shareId := range shareConfigs { + children := make([]RootFileList, 0) + rootMap[sharePath] = &RootFileList{ + ShareID: shareId, + VirtualPath: sharePath, + NodeInfo: NodeInfoData{}, + Child: &children, + } + } + + // 构建父子关系 + for sharePath, node := range rootMap { + if sharePath == "" { + continue + } + + pathParts := strings.Split(sharePath, "/") + if len(pathParts) > 1 { + parentPath := strings.Join(pathParts[:len(pathParts)-1], "/") + + // 确保所有父级路径都已创建 + _ensurePathExists(rootMap, parentPath) + + // 添加当前节点到父节点 + if parent, exists := rootMap[parentPath]; exists { + *parent.Child = append(*parent.Child, *node) + } + } + } + + return rootMap +} + +// 提取顶级节点 +func (d *DoubaoShare) _extractTopLevelNodes(rootMap map[string]*RootFileList, rootShares []string) []RootFileList { + var topLevelNodes []RootFileList + + // 添加根目录分享 + for _, shareId := range rootShares { + children := make([]RootFileList, 0) + topLevelNodes = append(topLevelNodes, RootFileList{ + ShareID: shareId, + VirtualPath: "", + NodeInfo: NodeInfoData{}, + Child: &children, + }) + } + + // 添加顶级目录 + for rootPath, node := range rootMap { + if rootPath == "" { + continue + } + + isTopLevel := true + pathParts := strings.Split(rootPath, "/") + + if len(pathParts) > 1 { + parentPath := strings.Join(pathParts[:len(pathParts)-1], "/") + if _, exists := rootMap[parentPath]; exists { + isTopLevel = false + } + } + + if isTopLevel { + topLevelNodes = append(topLevelNodes, *node) + } + } + + return topLevelNodes +} + +// 确保路径存在,创建所有必要的中间节点 +func _ensurePathExists(rootMap map[string]*RootFileList, path string) { + if path == "" { + return + } + + // 如果路径已存在,不需要再处理 + if _, exists := rootMap[path]; exists { + return + } + + // 创建当前路径节点 + children := make([]RootFileList, 0) + rootMap[path] = &RootFileList{ + ShareID: "", + VirtualPath: path, + NodeInfo: NodeInfoData{}, + Child: &children, + } + + // 处理父路径 + pathParts := strings.Split(path, "/") + if len(pathParts) > 1 { + parentPath := strings.Join(pathParts[:len(pathParts)-1], "/") + + // 确保父路径存在 + _ensurePathExists(rootMap, parentPath) + + // 将当前节点添加为父节点的子节点 + if parent, exists := rootMap[parentPath]; exists { + *parent.Child = append(*parent.Child, *rootMap[path]) + } + } +} + +// _extractShareId 从URL或直接ID中提取分享ID +func _extractShareId(input string) string { + input = strings.TrimSpace(input) + if strings.HasPrefix(input, "http") { + regex := regexp.MustCompile(`/drive/s/([a-zA-Z0-9]+)`) + if matches := regex.FindStringSubmatch(input); len(matches) > 1 { + return matches[1] + } + return "" + } + return input // 直接返回ID +} + +// _findRootFileByShareID 查找指定ShareID的配置 +func _findRootFileByShareID(rootFiles []RootFileList, shareID string) *RootFileList { + for i, rf := range rootFiles { + if rf.ShareID == shareID { + return &rootFiles[i] + } + if rf.Child != nil && len(*rf.Child) > 0 { + if found := _findRootFileByShareID(*rf.Child, shareID); found != nil { + return found + } + } + } + return nil +} + +// _findNodeByPath 查找指定路径的节点 +func _findNodeByPath(rootFiles []RootFileList, path string) *RootFileList { + for i, rf := range rootFiles { + if rf.VirtualPath == path { + return &rootFiles[i] + } + if rf.Child != nil && len(*rf.Child) > 0 { + if found := _findNodeByPath(*rf.Child, path); found != nil { + return found + } + } + } + return nil +} + +// _findShareByPath 根据路径查找分享和相对路径 +func _findShareByPath(rootFiles []RootFileList, path string) (*RootFileList, string) { + // 完全匹配或子路径匹配 + for i, rf := range rootFiles { + if rf.VirtualPath == path { + return &rootFiles[i], "" + } + + if rf.VirtualPath != "" && strings.HasPrefix(path, rf.VirtualPath+"/") { + relPath := strings.TrimPrefix(path, rf.VirtualPath+"/") + + // 先检查子节点 + if rf.Child != nil && len(*rf.Child) > 0 { + if child, childPath := _findShareByPath(*rf.Child, path); child != nil { + return child, childPath + } + } + + return &rootFiles[i], relPath + } + + // 递归检查子节点 + if rf.Child != nil && len(*rf.Child) > 0 { + if child, childPath := _findShareByPath(*rf.Child, path); child != nil { + return child, childPath + } + } + } + + // 检查根目录分享 + for i, rf := range rootFiles { + if rf.VirtualPath == "" && rf.ShareID != "" { + parts := strings.SplitN(path, "/", 2) + if len(parts) > 0 && parts[0] == rf.ShareID { + if len(parts) > 1 { + return &rootFiles[i], parts[1] + } + return &rootFiles[i], "" + } + } + } + + return nil, "" +} + +// _findShareAndPath 根据给定路径查找对应的ShareID和相对路径 +func (d *DoubaoShare) _findShareAndPath(dir model.Obj) (string, string, error) { + dirPath := dir.GetPath() + + // 如果是根目录,返回空值表示需要列出所有分享 + if dirPath == "/" || dirPath == "" { + return "", "", nil + } + + // 检查是否是 FileObject 类型,并获取 ShareID + if fo, ok := dir.(*FileObject); ok && fo.ShareID != "" { + // 直接使用对象中存储的 ShareID + // 计算相对路径(移除前导斜杠) + relativePath := strings.TrimPrefix(dirPath, "/") + + // 递归查找对应的 RootFile + found := _findRootFileByShareID(d.RootFiles, fo.ShareID) + if found != nil { + if found.VirtualPath != "" { + // 如果此分享配置了路径前缀,需要考虑相对路径的计算 + if strings.HasPrefix(relativePath, found.VirtualPath) { + return fo.ShareID, strings.TrimPrefix(relativePath, found.VirtualPath+"/"), nil + } + } + return fo.ShareID, relativePath, nil + } + + // 如果找不到对应的 RootFile 配置,仍然使用对象中的 ShareID + return fo.ShareID, relativePath, nil + } + + // 移除开头的斜杠 + cleanPath := strings.TrimPrefix(dirPath, "/") + + // 先检查是否有直接匹配的根目录分享 + for _, rootFile := range d.RootFiles { + if rootFile.VirtualPath == "" && rootFile.ShareID != "" { + // 检查是否匹配当前路径的第一部分 + parts := strings.SplitN(cleanPath, "/", 2) + if len(parts) > 0 && parts[0] == rootFile.ShareID { + if len(parts) > 1 { + return rootFile.ShareID, parts[1], nil + } + return rootFile.ShareID, "", nil + } + } + } + + // 查找匹配此路径的分享或虚拟目录 + share, relPath := _findShareByPath(d.RootFiles, cleanPath) + if share != nil { + return share.ShareID, relPath, nil + } + + log.Warnf("[doubao_share] No matching share path found: %s", dirPath) + return "", "", fmt.Errorf("no matching share path found: %s", dirPath) +} + +// convertToFileObject 将File转换为FileObject +func (d *DoubaoShare) convertToFileObject(file File, shareId string, relativePath string) *FileObject { + // 构建文件对象 + obj := &FileObject{ + Object: model.Object{ + ID: file.ID, + Name: file.Name, + Size: file.Size, + Modified: time.Unix(file.UpdateTime, 0), + Ctime: time.Unix(file.CreateTime, 0), + IsFolder: file.NodeType == DirectoryType, + Path: path.Join(relativePath, file.Name), + }, + ShareID: shareId, + Key: file.Key, + NodeID: file.ID, + NodeType: file.NodeType, + } + + return obj +} + +// getFilesInPath 获取指定分享和路径下的文件 +func (d *DoubaoShare) getFilesInPath(ctx context.Context, shareId, nodeId, relativePath string) ([]model.Obj, error) { + var ( + files []File + err error + ) + + // 调用overview接口获取分享链接信息 nodeId + if nodeId == "" { + files, err = d.getShareOverview(shareId, "") + if err != nil { + return nil, fmt.Errorf("failed to get share link information: %w", err) + } + + result := make([]model.Obj, 0, len(files)) + for _, file := range files { + result = append(result, d.convertToFileObject(file, shareId, "/")) + } + + return result, nil + + } else { + files, err = d.getFiles(shareId, nodeId, "") + if err != nil { + return nil, fmt.Errorf("failed to get share file: %w", err) + } + + result := make([]model.Obj, 0, len(files)) + for _, file := range files { + result = append(result, d.convertToFileObject(file, shareId, path.Join("/", relativePath))) + } + + return result, nil + } +} + +// listRootDirectory 处理根目录的内容展示 +func (d *DoubaoShare) listRootDirectory(ctx context.Context) ([]model.Obj, error) { + objects := make([]model.Obj, 0) + + // 分组处理:直接显示的分享内容 vs 虚拟目录 + var directShareIDs []string + addedDirs := make(map[string]bool) + + // 处理所有根节点 + for _, rootFile := range d.RootFiles { + if rootFile.VirtualPath == "" && rootFile.ShareID != "" { + // 无路径分享,记录ShareID以便后续获取内容 + directShareIDs = append(directShareIDs, rootFile.ShareID) + } else { + // 有路径的分享,显示第一级目录 + parts := strings.SplitN(rootFile.VirtualPath, "/", 2) + firstLevel := parts[0] + + // 避免重复添加同名目录 + if _, exists := addedDirs[firstLevel]; exists { + continue + } + + // 创建虚拟目录对象 + obj := &FileObject{ + Object: model.Object{ + ID: "", + Name: firstLevel, + Modified: time.Now(), + Ctime: time.Now(), + IsFolder: true, + Path: path.Join("/", firstLevel), + }, + ShareID: rootFile.ShareID, + Key: "", + NodeID: "", + NodeType: DirectoryType, + } + objects = append(objects, obj) + addedDirs[firstLevel] = true + } + } + + // 处理直接显示的分享内容 + for _, shareID := range directShareIDs { + shareFiles, err := d.getFilesInPath(ctx, shareID, "", "") + if err != nil { + log.Warnf("[doubao_share] Failed to get list of files in share %s: %s", shareID, err) + continue + } + objects = append(objects, shareFiles...) + } + + return objects, nil +} + +// listVirtualDirectoryContent 列出虚拟目录的内容 +func (d *DoubaoShare) listVirtualDirectoryContent(dir model.Obj) ([]model.Obj, error) { + dirPath := strings.TrimPrefix(dir.GetPath(), "/") + objects := make([]model.Obj, 0) + + // 递归查找此路径的节点 + node := _findNodeByPath(d.RootFiles, dirPath) + + if node != nil && node.Child != nil { + // 显示此节点的所有子节点 + for _, child := range *node.Child { + // 计算显示名称(取路径的最后一部分) + displayName := child.VirtualPath + if child.VirtualPath != "" { + parts := strings.Split(child.VirtualPath, "/") + displayName = parts[len(parts)-1] + } else if child.ShareID != "" { + displayName = child.ShareID + } + + obj := &FileObject{ + Object: model.Object{ + ID: "", + Name: displayName, + Modified: time.Now(), + Ctime: time.Now(), + IsFolder: true, + Path: path.Join("/", child.VirtualPath), + }, + ShareID: child.ShareID, + Key: "", + NodeID: "", + NodeType: DirectoryType, + } + objects = append(objects, obj) + } + } + + return objects, nil +} + +// generateContentDisposition 生成符合RFC 5987标准的Content-Disposition头部 +func generateContentDisposition(filename string) string { + // 按照RFC 2047进行编码,用于filename部分 + encodedName := urlEncode(filename) + + // 按照RFC 5987进行编码,用于filename*部分 + encodedNameRFC5987 := encodeRFC5987(filename) + + return fmt.Sprintf("attachment; filename=\"%s\"; filename*=utf-8''%s", + encodedName, encodedNameRFC5987) +} + +// encodeRFC5987 按照RFC 5987规范编码字符串,适用于HTTP头部参数中的非ASCII字符 +func encodeRFC5987(s string) string { + var buf strings.Builder + for _, r := range []byte(s) { + // 根据RFC 5987,只有字母、数字和部分特殊符号可以不编码 + if (r >= 'a' && r <= 'z') || + (r >= 'A' && r <= 'Z') || + (r >= '0' && r <= '9') || + r == '-' || r == '.' || r == '_' || r == '~' { + buf.WriteByte(r) + } else { + // 其他字符都需要百分号编码 + fmt.Fprintf(&buf, "%%%02X", r) + } + } + return buf.String() +} + +func urlEncode(s string) string { + s = url.QueryEscape(s) + s = strings.ReplaceAll(s, "+", "%20") + return s +} diff --git a/drivers/dropbox/driver.go b/drivers/dropbox/driver.go index 9b1717b0..fbaecc4a 100644 --- a/drivers/dropbox/driver.go +++ b/drivers/dropbox/driver.go @@ -191,7 +191,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt } url := d.contentBase + "/2/files/upload_session/append_v2" - reader := io.LimitReader(stream, PartSize) + reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize)) req, err := http.NewRequest(http.MethodPost, url, reader) if err != nil { log.Errorf("failed to update file when append to upload session, err: %+v", err) @@ -219,13 +219,8 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt return err } _ = res.Body.Close() - - if count > 0 { - up(float64(i+1) * 100 / float64(count)) - } - + up(float64(i+1) * 100 / float64(count)) offset += byteSize - } // 3.finish toPath := dstDir.GetPath() + "/" + stream.GetName() diff --git a/drivers/febbox/driver.go b/drivers/febbox/driver.go new file mode 100644 index 00000000..55c3aa21 --- /dev/null +++ b/drivers/febbox/driver.go @@ -0,0 +1,132 @@ +package febbox + +import ( + "context" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" +) + +type FebBox struct { + model.Storage + Addition + accessToken string + oauth2Token oauth2.TokenSource +} + +func (d *FebBox) Config() driver.Config { + return config +} + +func (d *FebBox) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *FebBox) Init(ctx context.Context) error { + // 初始化 oauth2Config + oauth2Config := &clientcredentials.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + AuthStyle: oauth2.AuthStyleInParams, + TokenURL: "https://api.febbox.com/oauth/token", + } + + d.initializeOAuth2Token(ctx, oauth2Config, d.Addition.RefreshToken) + + token, err := d.oauth2Token.Token() + if err != nil { + return err + } + d.accessToken = token.AccessToken + d.Addition.RefreshToken = token.RefreshToken + op.MustSaveDriverStorage(d) + + return nil +} + +func (d *FebBox) Drop(ctx context.Context) error { + return nil +} + +func (d *FebBox) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + files, err := d.getFilesList(dir.GetID()) + if err != nil { + return nil, err + } + return utils.SliceConvert(files, func(src File) (model.Obj, error) { + return fileToObj(src), nil + }) +} + +func (d *FebBox) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + var ip string + if d.Addition.UserIP != "" { + ip = d.Addition.UserIP + } else { + ip = args.IP + } + + url, err := d.getDownloadLink(file.GetID(), ip) + if err != nil { + return nil, err + } + return &model.Link{ + URL: url, + }, nil +} + +func (d *FebBox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + err := d.makeDir(parentDir.GetID(), dirName) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (d *FebBox) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + err := d.move(srcObj.GetID(), dstDir.GetID()) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (d *FebBox) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + err := d.rename(srcObj.GetID(), newName) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (d *FebBox) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + err := d.copy(srcObj.GetID(), dstDir.GetID()) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (d *FebBox) Remove(ctx context.Context, obj model.Obj) error { + err := d.remove(obj.GetID()) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + return nil, errs.NotImplement +} + +var _ driver.Driver = (*FebBox)(nil) diff --git a/drivers/febbox/meta.go b/drivers/febbox/meta.go new file mode 100644 index 00000000..1daeeea8 --- /dev/null +++ b/drivers/febbox/meta.go @@ -0,0 +1,36 @@ +package febbox + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootID + ClientID string `json:"client_id" required:"true" default:""` + ClientSecret string `json:"client_secret" required:"true" default:""` + RefreshToken string + SortRule string `json:"sort_rule" required:"true" type:"select" options:"size_asc,size_desc,name_asc,name_desc,update_asc,update_desc,ext_asc,ext_desc" default:"name_asc"` + PageSize int64 `json:"page_size" required:"true" type:"number" default:"100" help:"list api per page size of FebBox driver"` + UserIP string `json:"user_ip" default:"" help:"user ip address for download link which can speed up the download"` +} + +var config = driver.Config{ + Name: "FebBox", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: true, + NeedMs: false, + DefaultRoot: "0", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &FebBox{} + }) +} diff --git a/drivers/febbox/oauth2.go b/drivers/febbox/oauth2.go new file mode 100644 index 00000000..6345d1a7 --- /dev/null +++ b/drivers/febbox/oauth2.go @@ -0,0 +1,88 @@ +package febbox + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +type customTokenSource struct { + config *clientcredentials.Config + ctx context.Context + refreshToken string +} + +func (c *customTokenSource) Token() (*oauth2.Token, error) { + v := url.Values{} + if c.refreshToken != "" { + v.Set("grant_type", "refresh_token") + v.Set("refresh_token", c.refreshToken) + } else { + v.Set("grant_type", "client_credentials") + } + + v.Set("client_id", c.config.ClientID) + v.Set("client_secret", c.config.ClientSecret) + + req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := http.DefaultClient.Do(req.WithContext(c.ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, errors.New("oauth2: cannot fetch token") + } + + var tokenResp struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data struct { + AccessToken string `json:"access_token"` + ExpiresIn int64 `json:"expires_in"` + TokenType string `json:"token_type"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` + } `json:"data"` + } + + if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil { + return nil, err + } + + if tokenResp.Code != 1 { + return nil, errors.New("oauth2: server response error") + } + + c.refreshToken = tokenResp.Data.RefreshToken + + token := &oauth2.Token{ + AccessToken: tokenResp.Data.AccessToken, + TokenType: tokenResp.Data.TokenType, + RefreshToken: tokenResp.Data.RefreshToken, + Expiry: time.Now().Add(time.Duration(tokenResp.Data.ExpiresIn) * time.Second), + } + + return token, nil +} + +func (d *FebBox) initializeOAuth2Token(ctx context.Context, oauth2Config *clientcredentials.Config, refreshToken string) { + d.oauth2Token = oauth2.ReuseTokenSource(nil, &customTokenSource{ + config: oauth2Config, + ctx: ctx, + refreshToken: refreshToken, + }) +} diff --git a/drivers/febbox/types.go b/drivers/febbox/types.go new file mode 100644 index 00000000..2ac6d6b7 --- /dev/null +++ b/drivers/febbox/types.go @@ -0,0 +1,123 @@ +package febbox + +import ( + "fmt" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" + "strconv" + "time" +) + +type ErrResp struct { + ErrorCode int64 `json:"code"` + ErrorMsg string `json:"msg"` + ServerRunTime float64 `json:"server_runtime"` + ServerName string `json:"server_name"` +} + +func (e *ErrResp) IsError() bool { + return e.ErrorCode != 0 || e.ErrorMsg != "" || e.ServerRunTime != 0 || e.ServerName != "" +} + +func (e *ErrResp) Error() string { + return fmt.Sprintf("ErrorCode: %d ,Error: %s ,ServerRunTime: %f ,ServerName: %s", e.ErrorCode, e.ErrorMsg, e.ServerRunTime, e.ServerName) +} + +type FileListResp struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data struct { + FileList []File `json:"file_list"` + ShowType string `json:"show_type"` + } `json:"data"` +} + +type Rules struct { + AllowCopy int64 `json:"allow_copy"` + AllowDelete int64 `json:"allow_delete"` + AllowDownload int64 `json:"allow_download"` + AllowComment int64 `json:"allow_comment"` + HideLocation int64 `json:"hide_location"` +} + +type File struct { + Fid int64 `json:"fid"` + UID int64 `json:"uid"` + FileSize int64 `json:"file_size"` + Path string `json:"path"` + FileName string `json:"file_name"` + Ext string `json:"ext"` + AddTime int64 `json:"add_time"` + FileCreateTime int64 `json:"file_create_time"` + FileUpdateTime int64 `json:"file_update_time"` + ParentID int64 `json:"parent_id"` + UpdateTime int64 `json:"update_time"` + LastOpenTime int64 `json:"last_open_time"` + IsDir int64 `json:"is_dir"` + Epub int64 `json:"epub"` + IsMusicList int64 `json:"is_music_list"` + OssFid int64 `json:"oss_fid"` + Faststart int64 `json:"faststart"` + HasVideoQuality int64 `json:"has_video_quality"` + TotalDownload int64 `json:"total_download"` + Status int64 `json:"status"` + Remark string `json:"remark"` + OldHash string `json:"old_hash"` + Hash string `json:"hash"` + HashType string `json:"hash_type"` + FromUID int64 `json:"from_uid"` + FidOrg int64 `json:"fid_org"` + ShareID int64 `json:"share_id"` + InvitePermission int64 `json:"invite_permission"` + ThumbSmall string `json:"thumb_small"` + ThumbSmallWidth int64 `json:"thumb_small_width"` + ThumbSmallHeight int64 `json:"thumb_small_height"` + Thumb string `json:"thumb"` + ThumbWidth int64 `json:"thumb_width"` + ThumbHeight int64 `json:"thumb_height"` + ThumbBig string `json:"thumb_big"` + ThumbBigWidth int64 `json:"thumb_big_width"` + ThumbBigHeight int64 `json:"thumb_big_height"` + IsCustomThumb int64 `json:"is_custom_thumb"` + Photos int64 `json:"photos"` + IsAlbum int64 `json:"is_album"` + ReadOnly int64 `json:"read_only"` + Rules Rules `json:"rules"` + IsShared int64 `json:"is_shared"` +} + +func fileToObj(f File) *model.ObjThumb { + return &model.ObjThumb{ + Object: model.Object{ + ID: strconv.FormatInt(f.Fid, 10), + Name: f.FileName, + Size: f.FileSize, + Ctime: time.Unix(f.FileCreateTime, 0), + Modified: time.Unix(f.FileUpdateTime, 0), + IsFolder: f.IsDir == 1, + HashInfo: utils.NewHashInfo(hash_extend.GCID, f.Hash), + }, + Thumbnail: model.Thumbnail{ + Thumbnail: f.Thumb, + }, + } +} + +type FileDownloadResp struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data []struct { + Error int `json:"error"` + DownloadURL string `json:"download_url"` + Hash string `json:"hash"` + HashType string `json:"hash_type"` + Fid int `json:"fid"` + FileName string `json:"file_name"` + ParentID int `json:"parent_id"` + FileSize int `json:"file_size"` + Ext string `json:"ext"` + Thumb string `json:"thumb"` + VipLink int `json:"vip_link"` + } `json:"data"` +} diff --git a/drivers/febbox/util.go b/drivers/febbox/util.go new file mode 100644 index 00000000..ad2efe07 --- /dev/null +++ b/drivers/febbox/util.go @@ -0,0 +1,228 @@ +package febbox + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/op" + "github.com/go-resty/resty/v2" + "net/http" + "strconv" +) + +func (d *FebBox) refreshTokenByOAuth2() error { + token, err := d.oauth2Token.Token() + if err != nil { + return err + } + d.Status = "work" + d.accessToken = token.AccessToken + d.Addition.RefreshToken = token.RefreshToken + op.MustSaveDriverStorage(d) + return nil +} + +func (d *FebBox) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { + req := base.RestyClient.R() + // 使用oauth2 获取 access_token + token, err := d.oauth2Token.Token() + if err != nil { + return nil, err + } + req.SetAuthScheme(token.TokenType).SetAuthToken(token.AccessToken) + + if callback != nil { + callback(req) + } + if resp != nil { + req.SetResult(resp) + } + var e ErrResp + req.SetError(&e) + res, err := req.Execute(method, url) + if err != nil { + return nil, err + } + + switch e.ErrorCode { + case 0: + return res.Body(), nil + case 1: + return res.Body(), nil + case -10001: + if e.ServerName != "" { + // access_token 过期 + if err = d.refreshTokenByOAuth2(); err != nil { + return nil, err + } + return d.request(url, method, callback, resp) + } else { + return nil, errors.New(e.Error()) + } + default: + return nil, errors.New(e.Error()) + } +} + +func (d *FebBox) getFilesList(id string) ([]File, error) { + if d.PageSize <= 0 { + d.PageSize = 100 + } + res, err := d.listWithLimit(id, d.PageSize) + if err != nil { + return nil, err + } + return *res, nil +} + +func (d *FebBox) listWithLimit(dirID string, pageLimit int64) (*[]File, error) { + var files []File + page := int64(1) + for { + result, err := d.getFiles(dirID, page, pageLimit) + if err != nil { + return nil, err + } + files = append(files, *result...) + if int64(len(*result)) < pageLimit { + break + } else { + page++ + } + } + return &files, nil +} + +func (d *FebBox) getFiles(dirID string, page, pageLimit int64) (*[]File, error) { + var fileList FileListResp + queryParams := map[string]string{ + "module": "file_list", + "parent_id": dirID, + "page": strconv.FormatInt(page, 10), + "pagelimit": strconv.FormatInt(pageLimit, 10), + "order": d.Addition.SortRule, + } + + res, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, &fileList) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(res, &fileList); err != nil { + return nil, err + } + + return &fileList.Data.FileList, nil +} + +func (d *FebBox) getDownloadLink(id string, ip string) (string, error) { + var fileDownloadResp FileDownloadResp + queryParams := map[string]string{ + "module": "file_get_download_url", + "fids[]": id, + "ip": ip, + } + + res, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, &fileDownloadResp) + if err != nil { + return "", err + } + + if err = json.Unmarshal(res, &fileDownloadResp); err != nil { + return "", err + } + if len(fileDownloadResp.Data) == 0 { + return "", fmt.Errorf("can not get download link, code:%d, msg:%s", fileDownloadResp.Code, fileDownloadResp.Msg) + } + + return fileDownloadResp.Data[0].DownloadURL, nil +} + +func (d *FebBox) makeDir(id string, name string) error { + queryParams := map[string]string{ + "module": "create_dir", + "parent_id": id, + "name": name, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) move(id string, id2 string) error { + queryParams := map[string]string{ + "module": "file_move", + "fids[]": id, + "to": id2, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) rename(id string, name string) error { + queryParams := map[string]string{ + "module": "file_rename", + "fid": id, + "name": name, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) copy(id string, id2 string) error { + queryParams := map[string]string{ + "module": "file_copy", + "fids[]": id, + "to": id2, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} + +func (d *FebBox) remove(id string) error { + queryParams := map[string]string{ + "module": "file_delete", + "fids[]": id, + } + + _, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) { + req.SetMultipartFormData(queryParams) + }, nil) + if err != nil { + return err + } + + return nil +} diff --git a/drivers/ftp/driver.go b/drivers/ftp/driver.go index 05b9e49a..8f30b780 100644 --- a/drivers/ftp/driver.go +++ b/drivers/ftp/driver.go @@ -114,13 +114,15 @@ func (d *FTP) Remove(ctx context.Context, obj model.Obj) error { } } -func (d *FTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { if err := d.login(); err != nil { return err } - // TODO: support cancel - path := stdpath.Join(dstDir.GetPath(), stream.GetName()) - return d.conn.Stor(encode(path, d.Encoding), stream) + path := stdpath.Join(dstDir.GetPath(), s.GetName()) + return d.conn.Stor(encode(path, d.Encoding), driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + })) } var _ driver.Driver = (*FTP)(nil) diff --git a/drivers/github/driver.go b/drivers/github/driver.go new file mode 100644 index 00000000..dedd4945 --- /dev/null +++ b/drivers/github/driver.go @@ -0,0 +1,975 @@ +package github + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + stdpath "path" + "strings" + "sync" + "text/template" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +type Github struct { + model.Storage + Addition + client *resty.Client + mkdirMsgTmpl *template.Template + deleteMsgTmpl *template.Template + putMsgTmpl *template.Template + renameMsgTmpl *template.Template + copyMsgTmpl *template.Template + moveMsgTmpl *template.Template + isOnBranch bool + commitMutex sync.Mutex + pgpEntity *openpgp.Entity +} + +func (d *Github) Config() driver.Config { + return config +} + +func (d *Github) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Github) Init(ctx context.Context) error { + d.RootFolderPath = utils.FixAndCleanPath(d.RootFolderPath) + if d.CommitterName != "" && d.CommitterEmail == "" { + return errors.New("committer email is required") + } + if d.CommitterName == "" && d.CommitterEmail != "" { + return errors.New("committer name is required") + } + if d.AuthorName != "" && d.AuthorEmail == "" { + return errors.New("author email is required") + } + if d.AuthorName == "" && d.AuthorEmail != "" { + return errors.New("author name is required") + } + var err error + d.mkdirMsgTmpl, err = template.New("mkdirCommitMsgTemplate").Parse(d.MkdirCommitMsg) + if err != nil { + return err + } + d.deleteMsgTmpl, err = template.New("deleteCommitMsgTemplate").Parse(d.DeleteCommitMsg) + if err != nil { + return err + } + d.putMsgTmpl, err = template.New("putCommitMsgTemplate").Parse(d.PutCommitMsg) + if err != nil { + return err + } + d.renameMsgTmpl, err = template.New("renameCommitMsgTemplate").Parse(d.RenameCommitMsg) + if err != nil { + return err + } + d.copyMsgTmpl, err = template.New("copyCommitMsgTemplate").Parse(d.CopyCommitMsg) + if err != nil { + return err + } + d.moveMsgTmpl, err = template.New("moveCommitMsgTemplate").Parse(d.MoveCommitMsg) + if err != nil { + return err + } + d.client = base.NewRestyClient(). + SetHeader("Accept", "application/vnd.github.object+json"). + SetHeader("X-GitHub-Api-Version", "2022-11-28"). + SetLogger(log.StandardLogger()). + SetDebug(false) + token := strings.TrimSpace(d.Token) + if token != "" { + d.client = d.client.SetHeader("Authorization", "Bearer "+token) + } + if d.Ref == "" { + repo, err := d.getRepo() + if err != nil { + return err + } + d.Ref = repo.DefaultBranch + d.isOnBranch = true + } else { + _, err = d.getBranchHead() + d.isOnBranch = err == nil + } + if d.GPGPrivateKey != "" { + if d.CommitterName == "" || d.AuthorName == "" { + user, e := d.getAuthenticatedUser() + if e != nil { + return e + } + if d.CommitterName == "" { + d.CommitterName = user.Name + d.CommitterEmail = user.Email + } + if d.AuthorName == "" { + d.AuthorName = user.Name + d.AuthorEmail = user.Email + } + } + d.pgpEntity, err = loadPrivateKey(d.GPGPrivateKey, d.GPGKeyPassphrase) + if err != nil { + return err + } + } + return nil +} + +func (d *Github) Drop(ctx context.Context) error { + return nil +} + +func (d *Github) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + obj, err := d.get(dir.GetPath()) + if err != nil { + return nil, err + } + if obj.Entries == nil { + return nil, errs.NotFolder + } + if len(obj.Entries) >= 1000 { + tree, err := d.getTree(obj.Sha) + if err != nil { + return nil, err + } + if tree.Truncated { + return nil, fmt.Errorf("tree %s is truncated", dir.GetPath()) + } + ret := make([]model.Obj, 0, len(tree.Trees)) + for _, t := range tree.Trees { + if t.Path != ".gitkeep" { + ret = append(ret, t.toModelObj()) + } + } + return ret, nil + } else { + ret := make([]model.Obj, 0, len(obj.Entries)) + for _, entry := range obj.Entries { + if entry.Name != ".gitkeep" { + ret = append(ret, entry.toModelObj()) + } + } + return ret, nil + } +} + +func (d *Github) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + obj, err := d.get(file.GetPath()) + if err != nil { + return nil, err + } + if obj.Type == "submodule" { + return nil, errors.New("cannot download a submodule") + } + url := obj.DownloadURL + ghProxy := strings.TrimSpace(d.Addition.GitHubProxy) + if ghProxy != "" { + url = strings.Replace(url, "https://raw.githubusercontent.com", ghProxy, 1) + } + return &model.Link{ + URL: url, + }, nil +} + +func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + parent, err := d.get(parentDir.GetPath()) + if err != nil { + return err + } + if parent.Entries == nil { + return errs.NotFolder + } + subDirSha, err := d.newTree("", []interface{}{ + map[string]string{ + "path": ".gitkeep", + "mode": "100644", + "type": "blob", + "content": "", + }, + }) + if err != nil { + return err + } + newTree := make([]interface{}, 0, 2) + newTree = append(newTree, TreeObjReq{ + Path: dirName, + Mode: "040000", + Type: "tree", + Sha: subDirSha, + }) + if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" { + newTree = append(newTree, TreeObjReq{ + Path: ".gitkeep", + Mode: "100644", + Type: "blob", + Sha: nil, + }) + } + newSha, err := d.newTree(parent.Sha, newTree) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(parentDir.GetPath(), parent.Sha, newSha, "/") + if err != nil { + return err + } + + commitMessage, err := getMessage(d.mkdirMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: dirName, + ObjPath: stdpath.Join(parentDir.GetPath(), dirName), + ParentName: parentDir.GetName(), + ParentPath: parentDir.GetPath(), + }, "mkdir") + if err != nil { + return err + } + return d.commit(commitMessage, rootSha) +} + +func (d *Github) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) { + return errors.New("cannot move parent dir to child") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + + var rootSha string + if strings.HasPrefix(dstDir.GetPath(), stdpath.Dir(srcObj.GetPath())) { // /aa/1 -> /aa/bb/ + dstOldSha, dstNewSha, ancestorOldSha, srcParentTree, err := d.copyWithoutRenewTree(srcObj, dstDir) + if err != nil { + return err + } + + srcParentPath := stdpath.Dir(srcObj.GetPath()) + dstRest := dstDir.GetPath()[len(srcParentPath):] + if dstRest[0] == '/' { + dstRest = dstRest[1:] + } + dstNextName, _, _ := strings.Cut(dstRest, "/") + dstNextPath := stdpath.Join(srcParentPath, dstNextName) + dstNextTreeSha, err := d.renewParentTrees(dstDir.GetPath(), dstOldSha, dstNewSha, dstNextPath) + if err != nil { + return err + } + var delSrc, dstNextTree *TreeObjReq = nil, nil + for _, t := range srcParentTree.Trees { + if t.Path == dstNextName { + dstNextTree = &t.TreeObjReq + dstNextTree.Sha = dstNextTreeSha + } + if t.Path == srcObj.GetName() { + delSrc = &t.TreeObjReq + delSrc.Sha = nil + } + if delSrc != nil && dstNextTree != nil { + break + } + } + if delSrc == nil || dstNextTree == nil { + return errs.ObjectNotFound + } + ancestorNewSha, err := d.newTree(ancestorOldSha, []interface{}{*delSrc, *dstNextTree}) + if err != nil { + return err + } + rootSha, err = d.renewParentTrees(srcParentPath, ancestorOldSha, ancestorNewSha, "/") + if err != nil { + return err + } + } else if strings.HasPrefix(srcObj.GetPath(), dstDir.GetPath()) { // /aa/bb/1 -> /aa/ + srcParentPath := stdpath.Dir(srcObj.GetPath()) + srcParentTree, srcParentOldSha, err := d.getTreeDirectly(srcParentPath) + if err != nil { + return err + } + var src *TreeObjReq = nil + for _, t := range srcParentTree.Trees { + if t.Path == srcObj.GetName() { + if t.Type == "commit" { + return errors.New("cannot move a submodule") + } + src = &t.TreeObjReq + break + } + } + if src == nil { + return errs.ObjectNotFound + } + + delSrc := *src + delSrc.Sha = nil + delSrcTree := make([]interface{}, 0, 2) + delSrcTree = append(delSrcTree, delSrc) + if len(srcParentTree.Trees) == 1 { + delSrcTree = append(delSrcTree, map[string]string{ + "path": ".gitkeep", + "mode": "100644", + "type": "blob", + "content": "", + }) + } + srcParentNewSha, err := d.newTree(srcParentOldSha, delSrcTree) + if err != nil { + return err + } + srcRest := srcObj.GetPath()[len(dstDir.GetPath()):] + if srcRest[0] == '/' { + srcRest = srcRest[1:] + } + srcNextName, _, ok := strings.Cut(srcRest, "/") + if !ok { // /aa/1 -> /aa/ + return errors.New("cannot move in place") + } + srcNextPath := stdpath.Join(dstDir.GetPath(), srcNextName) + srcNextTreeSha, err := d.renewParentTrees(srcParentPath, srcParentOldSha, srcParentNewSha, srcNextPath) + if err != nil { + return err + } + + ancestorTree, ancestorOldSha, err := d.getTreeDirectly(dstDir.GetPath()) + if err != nil { + return err + } + var srcNextTree *TreeObjReq = nil + for _, t := range ancestorTree.Trees { + if t.Path == srcNextName { + srcNextTree = &t.TreeObjReq + srcNextTree.Sha = srcNextTreeSha + break + } + } + if srcNextTree == nil { + return errs.ObjectNotFound + } + ancestorNewSha, err := d.newTree(ancestorOldSha, []interface{}{*srcNextTree, *src}) + if err != nil { + return err + } + rootSha, err = d.renewParentTrees(dstDir.GetPath(), ancestorOldSha, ancestorNewSha, "/") + if err != nil { + return err + } + } else { // /aa/1 -> /bb/ + // do copy + dstOldSha, dstNewSha, srcParentOldSha, srcParentTree, err := d.copyWithoutRenewTree(srcObj, dstDir) + if err != nil { + return err + } + + // delete src object and create new tree + var srcNewTree *TreeObjReq = nil + for _, t := range srcParentTree.Trees { + if t.Path == srcObj.GetName() { + srcNewTree = &t.TreeObjReq + srcNewTree.Sha = nil + break + } + } + if srcNewTree == nil { + return errs.ObjectNotFound + } + delSrcTree := make([]interface{}, 0, 2) + delSrcTree = append(delSrcTree, *srcNewTree) + if len(srcParentTree.Trees) == 1 { + delSrcTree = append(delSrcTree, map[string]string{ + "path": ".gitkeep", + "mode": "100644", + "type": "blob", + "content": "", + }) + } + srcParentNewSha, err := d.newTree(srcParentOldSha, delSrcTree) + if err != nil { + return err + } + + // renew but the common ancestor of srcPath and dstPath + ancestor, srcChildName, dstChildName, _, _ := getPathCommonAncestor(srcObj.GetPath(), dstDir.GetPath()) + dstNextTreeSha, err := d.renewParentTrees(dstDir.GetPath(), dstOldSha, dstNewSha, stdpath.Join(ancestor, dstChildName)) + if err != nil { + return err + } + srcNextTreeSha, err := d.renewParentTrees(stdpath.Dir(srcObj.GetPath()), srcParentOldSha, srcParentNewSha, stdpath.Join(ancestor, srcChildName)) + if err != nil { + return err + } + + // renew the tree of the last common ancestor + ancestorTree, ancestorOldSha, err := d.getTreeDirectly(ancestor) + if err != nil { + return err + } + newTree := make([]interface{}, 2) + srcBind := false + dstBind := false + for _, t := range ancestorTree.Trees { + if t.Path == srcChildName { + t.Sha = srcNextTreeSha + newTree[0] = t.TreeObjReq + srcBind = true + } + if t.Path == dstChildName { + t.Sha = dstNextTreeSha + newTree[1] = t.TreeObjReq + dstBind = true + } + if srcBind && dstBind { + break + } + } + if !srcBind || !dstBind { + return errs.ObjectNotFound + } + ancestorNewSha, err := d.newTree(ancestorOldSha, newTree) + if err != nil { + return err + } + // renew until root + rootSha, err = d.renewParentTrees(ancestor, ancestorOldSha, ancestorNewSha, "/") + if err != nil { + return err + } + } + + // commit + message, err := getMessage(d.moveMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: srcObj.GetName(), + ObjPath: srcObj.GetPath(), + ParentName: stdpath.Base(stdpath.Dir(srcObj.GetPath())), + ParentPath: stdpath.Dir(srcObj.GetPath()), + TargetName: stdpath.Base(dstDir.GetPath()), + TargetPath: dstDir.GetPath(), + }, "move") + if err != nil { + return err + } + return d.commit(message, rootSha) +} + +func (d *Github) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + parentDir := stdpath.Dir(srcObj.GetPath()) + tree, _, err := d.getTreeDirectly(parentDir) + if err != nil { + return err + } + newTree := make([]interface{}, 2) + operated := false + for _, t := range tree.Trees { + if t.Path == srcObj.GetName() { + if t.Type == "commit" { + return errors.New("cannot rename a submodule") + } + delCopy := t.TreeObjReq + delCopy.Sha = nil + newTree[0] = delCopy + t.Path = newName + newTree[1] = t.TreeObjReq + operated = true + break + } + } + if !operated { + return errs.ObjectNotFound + } + newSha, err := d.newTree(tree.Sha, newTree) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(parentDir, tree.Sha, newSha, "/") + if err != nil { + return err + } + message, err := getMessage(d.renameMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: srcObj.GetName(), + ObjPath: srcObj.GetPath(), + ParentName: stdpath.Base(parentDir), + ParentPath: parentDir, + TargetName: newName, + TargetPath: stdpath.Join(parentDir, newName), + }, "rename") + if err != nil { + return err + } + return d.commit(message, rootSha) +} + +func (d *Github) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) { + return errors.New("cannot copy parent dir to child") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + + dstSha, newSha, _, _, err := d.copyWithoutRenewTree(srcObj, dstDir) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(dstDir.GetPath(), dstSha, newSha, "/") + if err != nil { + return err + } + message, err := getMessage(d.copyMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: srcObj.GetName(), + ObjPath: srcObj.GetPath(), + ParentName: stdpath.Base(stdpath.Dir(srcObj.GetPath())), + ParentPath: stdpath.Dir(srcObj.GetPath()), + TargetName: stdpath.Base(dstDir.GetPath()), + TargetPath: dstDir.GetPath(), + }, "copy") + if err != nil { + return err + } + return d.commit(message, rootSha) +} + +func (d *Github) Remove(ctx context.Context, obj model.Obj) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + parentDir := stdpath.Dir(obj.GetPath()) + tree, treeSha, err := d.getTreeDirectly(parentDir) + if err != nil { + return err + } + var del *TreeObjReq = nil + for _, t := range tree.Trees { + if t.Path == obj.GetName() { + if t.Type == "commit" { + return errors.New("cannot remove a submodule") + } + del = &t.TreeObjReq + del.Sha = nil + break + } + } + if del == nil { + return errs.ObjectNotFound + } + newTree := make([]interface{}, 0, 2) + newTree = append(newTree, *del) + if len(tree.Trees) == 1 { // completely emptying the repository will get a 404 + newTree = append(newTree, map[string]string{ + "path": ".gitkeep", + "mode": "100644", + "type": "blob", + "content": "", + }) + } + newSha, err := d.newTree(treeSha, newTree) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(parentDir, treeSha, newSha, "/") + if err != nil { + return err + } + commitMessage, err := getMessage(d.deleteMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: obj.GetName(), + ObjPath: obj.GetPath(), + ParentName: stdpath.Base(parentDir), + ParentPath: parentDir, + }, "remove") + if err != nil { + return err + } + return d.commit(commitMessage, rootSha) +} + +func (d *Github) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + if !d.isOnBranch { + return errors.New("cannot write to non-branch reference") + } + blob, err := d.putBlob(ctx, stream, up) + if err != nil { + return err + } + d.commitMutex.Lock() + defer d.commitMutex.Unlock() + parent, err := d.get(dstDir.GetPath()) + if err != nil { + return err + } + if parent.Entries == nil { + return errs.NotFolder + } + newTree := make([]interface{}, 0, 2) + newTree = append(newTree, TreeObjReq{ + Path: stream.GetName(), + Mode: "100644", + Type: "blob", + Sha: blob, + }) + if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" { + newTree = append(newTree, TreeObjReq{ + Path: ".gitkeep", + Mode: "100644", + Type: "blob", + Sha: nil, + }) + } + newSha, err := d.newTree(parent.Sha, newTree) + if err != nil { + return err + } + rootSha, err := d.renewParentTrees(dstDir.GetPath(), parent.Sha, newSha, "/") + if err != nil { + return err + } + + commitMessage, err := getMessage(d.putMsgTmpl, &MessageTemplateVars{ + UserName: getUsername(ctx), + ObjName: stream.GetName(), + ObjPath: stdpath.Join(dstDir.GetPath(), stream.GetName()), + ParentName: dstDir.GetName(), + ParentPath: dstDir.GetPath(), + }, "upload") + if err != nil { + return err + } + return d.commit(commitMessage, rootSha) +} + +var _ driver.Driver = (*Github)(nil) + +func (d *Github) getContentApiUrl(path string) string { + path = utils.FixAndCleanPath(path) + return fmt.Sprintf("https://api.github.com/repos/%s/%s/contents%s", d.Owner, d.Repo, path) +} + +func (d *Github) get(path string) (*Object, error) { + res, err := d.client.R().SetQueryParam("ref", d.Ref).Get(d.getContentApiUrl(path)) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + return nil, toErr(res) + } + var resp Object + err = utils.Json.Unmarshal(res.Body(), &resp) + return &resp, err +} + +func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.UpdateProgress) (string, error) { + beforeContent := "{\"encoding\":\"base64\",\"content\":\"" + afterContent := "\"}" + length := int64(len(beforeContent)) + calculateBase64Length(s.GetSize()) + int64(len(afterContent)) + beforeContentReader := strings.NewReader(beforeContent) + contentReader, contentWriter := io.Pipe() + go func() { + encoder := base64.NewEncoder(base64.StdEncoding, contentWriter) + if _, err := utils.CopyWithBuffer(encoder, s); err != nil { + _ = contentWriter.CloseWithError(err) + return + } + _ = encoder.Close() + _ = contentWriter.Close() + }() + afterContentReader := strings.NewReader(afterContent) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo), + driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ + Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader), + Size: length, + }, + UpdateProgress: up, + })) + if err != nil { + return "", err + } + req.Header.Set("Accept", "application/vnd.github+json") + req.Header.Set("X-GitHub-Api-Version", "2022-11-28") + token := strings.TrimSpace(d.Token) + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } + req.ContentLength = length + + res, err := base.HttpClient.Do(req) + if err != nil { + return "", err + } + defer res.Body.Close() + resBody, err := io.ReadAll(res.Body) + if err != nil { + return "", err + } + if res.StatusCode != 201 { + var errMsg ErrResp + if err = utils.Json.Unmarshal(resBody, &errMsg); err != nil { + return "", errors.New(res.Status) + } else { + return "", fmt.Errorf("%s: %s", res.Status, errMsg.Message) + } + } + var resp PutBlobResp + if err = utils.Json.Unmarshal(resBody, &resp); err != nil { + return "", err + } + return resp.Sha, nil +} + +func (d *Github) renewParentTrees(path, prevSha, curSha, until string) (string, error) { + for path != until { + path = stdpath.Dir(path) + tree, sha, err := d.getTreeDirectly(path) + if err != nil { + return "", err + } + var newTree *TreeObjReq = nil + for _, t := range tree.Trees { + if t.Sha == prevSha { + newTree = &t.TreeObjReq + newTree.Sha = curSha + break + } + } + if newTree == nil { + return "", errs.ObjectNotFound + } + curSha, err = d.newTree(sha, []interface{}{*newTree}) + if err != nil { + return "", err + } + prevSha = sha + } + return curSha, nil +} + +func (d *Github) getTree(sha string) (*TreeResp, error) { + res, err := d.client.R().Get(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees/%s", d.Owner, d.Repo, sha)) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + return nil, toErr(res) + } + var resp TreeResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (d *Github) getTreeDirectly(path string) (*TreeResp, string, error) { + p, err := d.get(path) + if err != nil { + return nil, "", err + } + if p.Entries == nil { + return nil, "", fmt.Errorf("%s is not a folder", path) + } + tree, err := d.getTree(p.Sha) + if err != nil { + return nil, "", err + } + if tree.Truncated { + return nil, "", fmt.Errorf("tree %s is truncated", path) + } + return tree, p.Sha, nil +} + +func (d *Github) newTree(baseSha string, tree []interface{}) (string, error) { + body := &TreeReq{Trees: tree} + if baseSha != "" { + body.BaseTree = baseSha + } + res, err := d.client.R().SetBody(body). + Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees", d.Owner, d.Repo)) + if err != nil { + return "", err + } + if res.StatusCode() != 201 { + return "", toErr(res) + } + var resp TreeResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return "", err + } + return resp.Sha, nil +} + +func (d *Github) commit(message, treeSha string) error { + oldCommit, err := d.getBranchHead() + body := map[string]interface{}{ + "message": message, + "tree": treeSha, + "parents": []string{oldCommit}, + } + d.addCommitterAndAuthor(&body) + if d.pgpEntity != nil { + signature, e := signCommit(&body, d.pgpEntity) + if e != nil { + return e + } + body["signature"] = signature + } + res, err := d.client.R().SetBody(body).Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/commits", d.Owner, d.Repo)) + if err != nil { + return err + } + if res.StatusCode() != 201 { + return toErr(res) + } + var resp CommitResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return err + } + + // update branch head + res, err = d.client.R(). + SetBody(&UpdateRefReq{ + Sha: resp.Sha, + Force: false, + }). + Patch(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/refs/heads/%s", d.Owner, d.Repo, d.Ref)) + if err != nil { + return err + } + if res.StatusCode() != 200 { + return toErr(res) + } + return nil +} + +func (d *Github) getBranchHead() (string, error) { + res, err := d.client.R().Get(fmt.Sprintf("https://api.github.com/repos/%s/%s/branches/%s", d.Owner, d.Repo, d.Ref)) + if err != nil { + return "", err + } + if res.StatusCode() != 200 { + return "", toErr(res) + } + var resp BranchResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return "", err + } + return resp.Commit.Sha, nil +} + +func (d *Github) copyWithoutRenewTree(srcObj, dstDir model.Obj) (dstSha, newSha, srcParentSha string, srcParentTree *TreeResp, err error) { + dst, err := d.get(dstDir.GetPath()) + if err != nil { + return "", "", "", nil, err + } + if dst.Entries == nil { + return "", "", "", nil, errs.NotFolder + } + dstSha = dst.Sha + srcParentPath := stdpath.Dir(srcObj.GetPath()) + srcParentTree, srcParentSha, err = d.getTreeDirectly(srcParentPath) + if err != nil { + return "", "", "", nil, err + } + var src *TreeObjReq = nil + for _, t := range srcParentTree.Trees { + if t.Path == srcObj.GetName() { + if t.Type == "commit" { + return "", "", "", nil, errors.New("cannot copy a submodule") + } + src = &t.TreeObjReq + break + } + } + if src == nil { + return "", "", "", nil, errs.ObjectNotFound + } + + newTree := make([]interface{}, 0, 2) + newTree = append(newTree, *src) + if len(dst.Entries) == 1 && dst.Entries[0].Name == ".gitkeep" { + newTree = append(newTree, TreeObjReq{ + Path: ".gitkeep", + Mode: "100644", + Type: "blob", + Sha: nil, + }) + } + newSha, err = d.newTree(dstSha, newTree) + if err != nil { + return "", "", "", nil, err + } + return dstSha, newSha, srcParentSha, srcParentTree, nil +} + +func (d *Github) getRepo() (*RepoResp, error) { + res, err := d.client.R().Get(fmt.Sprintf("https://api.github.com/repos/%s/%s", d.Owner, d.Repo)) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + return nil, toErr(res) + } + var resp RepoResp + if err = utils.Json.Unmarshal(res.Body(), &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (d *Github) getAuthenticatedUser() (*UserResp, error) { + res, err := d.client.R().Get("https://api.github.com/user") + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + return nil, toErr(res) + } + resp := &UserResp{} + if err = utils.Json.Unmarshal(res.Body(), resp); err != nil { + return nil, err + } + return resp, nil +} + +func (d *Github) addCommitterAndAuthor(m *map[string]interface{}) { + if d.CommitterName != "" { + committer := map[string]string{ + "name": d.CommitterName, + "email": d.CommitterEmail, + } + (*m)["committer"] = committer + } + if d.AuthorName != "" { + author := map[string]string{ + "name": d.AuthorName, + "email": d.AuthorEmail, + } + (*m)["author"] = author + } +} diff --git a/drivers/github/meta.go b/drivers/github/meta.go new file mode 100644 index 00000000..7de8d73c --- /dev/null +++ b/drivers/github/meta.go @@ -0,0 +1,39 @@ +package github + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootPath + Token string `json:"token" type:"string" required:"true"` + Owner string `json:"owner" type:"string" required:"true"` + Repo string `json:"repo" type:"string" required:"true"` + Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."` + GitHubProxy string `json:"gh_proxy" type:"string" help:"GitHub proxy, e.g. https://ghproxy.net/raw.githubusercontent.com or https://gh-proxy.com/raw.githubusercontent.com"` + GPGPrivateKey string `json:"gpg_private_key" type:"text"` + GPGKeyPassphrase string `json:"gpg_key_passphrase" type:"string"` + CommitterName string `json:"committer_name" type:"string"` + CommitterEmail string `json:"committer_email" type:"string"` + AuthorName string `json:"author_name" type:"string"` + AuthorEmail string `json:"author_email" type:"string"` + MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"` + DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"` + PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"` + RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"` + CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"` + MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"` +} + +var config = driver.Config{ + Name: "GitHub API", + LocalSort: true, + DefaultRoot: "/", +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Github{} + }) +} diff --git a/drivers/github/types.go b/drivers/github/types.go new file mode 100644 index 00000000..b057385c --- /dev/null +++ b/drivers/github/types.go @@ -0,0 +1,107 @@ +package github + +import ( + "github.com/alist-org/alist/v3/internal/model" + "time" +) + +type Links struct { + Git string `json:"git"` + Html string `json:"html"` + Self string `json:"self"` +} + +type Object struct { + Type string `json:"type"` + Encoding string `json:"encoding" required:"false"` + Size int64 `json:"size"` + Name string `json:"name"` + Path string `json:"path"` + Content string `json:"Content" required:"false"` + Sha string `json:"sha"` + URL string `json:"url"` + GitURL string `json:"git_url"` + HtmlURL string `json:"html_url"` + DownloadURL string `json:"download_url"` + Entries []Object `json:"entries" required:"false"` + Links Links `json:"_links"` + SubmoduleGitURL string `json:"submodule_git_url" required:"false"` + Target string `json:"target" required:"false"` +} + +func (o *Object) toModelObj() *model.Object { + return &model.Object{ + Name: o.Name, + Size: o.Size, + Modified: time.Unix(0, 0), + IsFolder: o.Type == "dir", + } +} + +type PutBlobResp struct { + URL string `json:"url"` + Sha string `json:"sha"` +} + +type ErrResp struct { + Message string `json:"message"` + DocumentationURL string `json:"documentation_url"` + Status string `json:"status"` +} + +type TreeObjReq struct { + Path string `json:"path"` + Mode string `json:"mode"` + Type string `json:"type"` + Sha interface{} `json:"sha"` +} + +type TreeObjResp struct { + TreeObjReq + Size int64 `json:"size" required:"false"` + URL string `json:"url"` +} + +func (o *TreeObjResp) toModelObj() *model.Object { + return &model.Object{ + Name: o.Path, + Size: o.Size, + Modified: time.Unix(0, 0), + IsFolder: o.Type == "tree", + } +} + +type TreeResp struct { + Sha string `json:"sha"` + URL string `json:"url"` + Trees []TreeObjResp `json:"tree"` + Truncated bool `json:"truncated"` +} + +type TreeReq struct { + BaseTree interface{} `json:"base_tree,omitempty"` + Trees []interface{} `json:"tree"` +} + +type CommitResp struct { + Sha string `json:"sha"` +} + +type BranchResp struct { + Name string `json:"name"` + Commit CommitResp `json:"commit"` +} + +type UpdateRefReq struct { + Sha string `json:"sha"` + Force bool `json:"force"` +} + +type RepoResp struct { + DefaultBranch string `json:"default_branch"` +} + +type UserResp struct { + Name string `json:"name"` + Email string `json:"email"` +} diff --git a/drivers/github/util.go b/drivers/github/util.go new file mode 100644 index 00000000..7ddf8746 --- /dev/null +++ b/drivers/github/util.go @@ -0,0 +1,166 @@ +package github + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + "text/template" + "time" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" +) + +type MessageTemplateVars struct { + UserName string + ObjName string + ObjPath string + ParentName string + ParentPath string + TargetName string + TargetPath string +} + +func getMessage(tmpl *template.Template, vars *MessageTemplateVars, defaultOpStr string) (string, error) { + sb := strings.Builder{} + if err := tmpl.Execute(&sb, vars); err != nil { + return fmt.Sprintf("%s %s %s", vars.UserName, defaultOpStr, vars.ObjPath), err + } + return sb.String(), nil +} + +func calculateBase64Length(inputLength int64) int64 { + return 4 * ((inputLength + 2) / 3) +} + +func toErr(res *resty.Response) error { + var errMsg ErrResp + if err := utils.Json.Unmarshal(res.Body(), &errMsg); err != nil { + return errors.New(res.Status()) + } else { + return fmt.Errorf("%s: %s", res.Status(), errMsg.Message) + } +} + +// Example input: +// a = /aaa/bbb/ccc +// b = /aaa/b11/ddd/ccc +// +// Output: +// ancestor = /aaa +// aChildName = bbb +// bChildName = b11 +// aRest = bbb/ccc +// bRest = b11/ddd/ccc +func getPathCommonAncestor(a, b string) (ancestor, aChildName, bChildName, aRest, bRest string) { + a = utils.FixAndCleanPath(a) + b = utils.FixAndCleanPath(b) + idx := 1 + for idx < len(a) && idx < len(b) { + if a[idx] != b[idx] { + break + } + idx++ + } + aNextIdx := idx + for aNextIdx < len(a) { + if a[aNextIdx] == '/' { + break + } + aNextIdx++ + } + bNextIdx := idx + for bNextIdx < len(b) { + if b[bNextIdx] == '/' { + break + } + bNextIdx++ + } + for idx > 0 { + if a[idx] == '/' { + break + } + idx-- + } + ancestor = utils.FixAndCleanPath(a[:idx]) + aChildName = a[idx+1 : aNextIdx] + bChildName = b[idx+1 : bNextIdx] + aRest = a[idx+1:] + bRest = b[idx+1:] + return ancestor, aChildName, bChildName, aRest, bRest +} + +func getUsername(ctx context.Context) string { + user, ok := ctx.Value("user").(*model.User) + if !ok { + return "" + } + return user.Username +} + +func loadPrivateKey(key, passphrase string) (*openpgp.Entity, error) { + entityList, err := openpgp.ReadArmoredKeyRing(strings.NewReader(key)) + if err != nil { + return nil, err + } + if len(entityList) < 1 { + return nil, fmt.Errorf("no keys found in key ring") + } + entity := entityList[0] + + pass := []byte(passphrase) + if entity.PrivateKey != nil && entity.PrivateKey.Encrypted { + if err = entity.PrivateKey.Decrypt(pass); err != nil { + return nil, fmt.Errorf("password incorrect: %+v", err) + } + } + for _, subKey := range entity.Subkeys { + if subKey.PrivateKey != nil && subKey.PrivateKey.Encrypted { + if err = subKey.PrivateKey.Decrypt(pass); err != nil { + return nil, fmt.Errorf("password incorrect: %+v", err) + } + } + } + return entity, nil +} + +func signCommit(m *map[string]interface{}, entity *openpgp.Entity) (string, error) { + var commit strings.Builder + commit.WriteString(fmt.Sprintf("tree %s\n", (*m)["tree"].(string))) + parents := (*m)["parents"].([]string) + for _, p := range parents { + commit.WriteString(fmt.Sprintf("parent %s\n", p)) + } + now := time.Now() + _, offset := now.Zone() + hour := offset / 3600 + author := (*m)["author"].(map[string]string) + commit.WriteString(fmt.Sprintf("author %s <%s> %d %+03d00\n", author["name"], author["email"], now.Unix(), hour)) + author["date"] = now.Format(time.RFC3339) + committer := (*m)["committer"].(map[string]string) + commit.WriteString(fmt.Sprintf("committer %s <%s> %d %+03d00\n", committer["name"], committer["email"], now.Unix(), hour)) + committer["date"] = now.Format(time.RFC3339) + commit.WriteString(fmt.Sprintf("\n%s", (*m)["message"].(string))) + data := commit.String() + + var sigBuffer bytes.Buffer + err := openpgp.DetachSign(&sigBuffer, entity, strings.NewReader(data), nil) + if err != nil { + return "", fmt.Errorf("signing failed: %v", err) + } + var armoredSig bytes.Buffer + armorWriter, err := armor.Encode(&armoredSig, "PGP SIGNATURE", nil) + if err != nil { + return "", err + } + if _, err = utils.CopyWithBuffer(armorWriter, &sigBuffer); err != nil { + return "", err + } + _ = armorWriter.Close() + return armoredSig.String(), nil +} diff --git a/drivers/github_releases/driver.go b/drivers/github_releases/driver.go new file mode 100644 index 00000000..b35aa57a --- /dev/null +++ b/drivers/github_releases/driver.go @@ -0,0 +1,167 @@ +package github_releases + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" +) + +type GithubReleases struct { + model.Storage + Addition + + points []MountPoint +} + +func (d *GithubReleases) Config() driver.Config { + return config +} + +func (d *GithubReleases) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *GithubReleases) Init(ctx context.Context) error { + d.ParseRepos(d.Addition.RepoStructure) + return nil +} + +func (d *GithubReleases) Drop(ctx context.Context) error { + return nil +} + +func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + files := make([]File, 0) + path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/")) + + for i := range d.points { + point := &d.points[i] + + if !d.Addition.ShowAllVersion { // latest + point.RequestRelease(d.GetRequest, args.Refresh) + + if point.Point == path { // 与仓库路径相同 + files = append(files, point.GetLatestRelease()...) + if d.Addition.ShowReadme { + files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...) + } + } else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录 + nextDir := GetNextDir(point.Point, path) + if nextDir == "" { + continue + } + + hasSameDir := false + for index := range files { + if files[index].GetName() == nextDir { + hasSameDir = true + files[index].Size += point.GetLatestSize() + break + } + } + if !hasSameDir { + files = append(files, File{ + Path: path + "/" + nextDir, + FileName: nextDir, + Size: point.GetLatestSize(), + UpdateAt: point.Release.PublishedAt, + CreateAt: point.Release.CreatedAt, + Type: "dir", + Url: "", + }) + } + } + } else { // all version + point.RequestReleases(d.GetRequest, args.Refresh) + + if point.Point == path { // 与仓库路径相同 + files = append(files, point.GetAllVersion()...) + if d.Addition.ShowReadme { + files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...) + } + } else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录 + nextDir := GetNextDir(point.Point, path) + if nextDir == "" { + continue + } + + hasSameDir := false + for index := range files { + if files[index].GetName() == nextDir { + hasSameDir = true + files[index].Size += point.GetAllVersionSize() + break + } + } + if !hasSameDir { + files = append(files, File{ + FileName: nextDir, + Path: path + "/" + nextDir, + Size: point.GetAllVersionSize(), + UpdateAt: (*point.Releases)[0].PublishedAt, + CreateAt: (*point.Releases)[0].CreatedAt, + Type: "dir", + Url: "", + }) + } + } else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录 + tagName := GetNextDir(path, point.Point) + if tagName == "" { + continue + } + + files = append(files, point.GetReleaseByTagName(tagName)...) + } + } + } + + return utils.SliceConvert(files, func(src File) (model.Obj, error) { + return src, nil + }) +} + +func (d *GithubReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + url := file.GetID() + gh_proxy := strings.TrimSpace(d.Addition.GitHubProxy) + + if gh_proxy != "" { + url = strings.Replace(url, "https://github.com", gh_proxy, 1) + } + + link := model.Link{ + URL: url, + Header: http.Header{}, + } + return &link, nil +} + +func (d *GithubReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + // TODO create folder, optional + return nil, errs.NotImplement +} + +func (d *GithubReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO move obj, optional + return nil, errs.NotImplement +} + +func (d *GithubReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + // TODO rename obj, optional + return nil, errs.NotImplement +} + +func (d *GithubReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + // TODO copy obj, optional + return nil, errs.NotImplement +} + +func (d *GithubReleases) Remove(ctx context.Context, obj model.Obj) error { + // TODO remove obj, optional + return errs.NotImplement +} diff --git a/drivers/github_releases/meta.go b/drivers/github_releases/meta.go new file mode 100644 index 00000000..47b84d37 --- /dev/null +++ b/drivers/github_releases/meta.go @@ -0,0 +1,35 @@ +package github_releases + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootID + RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"` + ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"` + Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"` + ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"` + GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "` +} + +var config = driver.Config{ + Name: "GitHub Releases", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &GithubReleases{} + }) +} diff --git a/drivers/github_releases/models.go b/drivers/github_releases/models.go new file mode 100644 index 00000000..a9a0e493 --- /dev/null +++ b/drivers/github_releases/models.go @@ -0,0 +1,86 @@ +package github_releases + +type Release struct { + Url string `json:"url"` + AssetsUrl string `json:"assets_url"` + UploadUrl string `json:"upload_url"` + HtmlUrl string `json:"html_url"` + Id int `json:"id"` + Author User `json:"author"` + NodeId string `json:"node_id"` + TagName string `json:"tag_name"` + TargetCommitish string `json:"target_commitish"` + Name string `json:"name"` + Draft bool `json:"draft"` + Prerelease bool `json:"prerelease"` + CreatedAt string `json:"created_at"` + PublishedAt string `json:"published_at"` + Assets []Asset `json:"assets"` + TarballUrl string `json:"tarball_url"` + ZipballUrl string `json:"zipball_url"` + Body string `json:"body"` + Reactions Reactions `json:"reactions"` +} + +type User struct { + Login string `json:"login"` + Id int `json:"id"` + NodeId string `json:"node_id"` + AvatarUrl string `json:"avatar_url"` + GravatarId string `json:"gravatar_id"` + Url string `json:"url"` + HtmlUrl string `json:"html_url"` + FollowersUrl string `json:"followers_url"` + FollowingUrl string `json:"following_url"` + GistsUrl string `json:"gists_url"` + StarredUrl string `json:"starred_url"` + SubscriptionsUrl string `json:"subscriptions_url"` + OrganizationsUrl string `json:"organizations_url"` + ReposUrl string `json:"repos_url"` + EventsUrl string `json:"events_url"` + ReceivedEventsUrl string `json:"received_events_url"` + Type string `json:"type"` + UserViewType string `json:"user_view_type"` + SiteAdmin bool `json:"site_admin"` +} + +type Asset struct { + Url string `json:"url"` + Id int `json:"id"` + NodeId string `json:"node_id"` + Name string `json:"name"` + Label string `json:"label"` + Uploader User `json:"uploader"` + ContentType string `json:"content_type"` + State string `json:"state"` + Size int64 `json:"size"` + DownloadCount int `json:"download_count"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + BrowserDownloadUrl string `json:"browser_download_url"` +} + +type Reactions struct { + Url string `json:"url"` + TotalCount int `json:"total_count"` + PlusOne int `json:"+1"` + MinusOne int `json:"-1"` + Laugh int `json:"laugh"` + Hooray int `json:"hooray"` + Confused int `json:"confused"` + Heart int `json:"heart"` + Rocket int `json:"rocket"` + Eyes int `json:"eyes"` +} + +type FileInfo struct { + Name string `json:"name"` + Path string `json:"path"` + Sha string `json:"sha"` + Size int64 `json:"size"` + Url string `json:"url"` + HtmlUrl string `json:"html_url"` + GitUrl string `json:"git_url"` + DownloadUrl string `json:"download_url"` + Type string `json:"type"` +} diff --git a/drivers/github_releases/types.go b/drivers/github_releases/types.go new file mode 100644 index 00000000..b0a9ee61 --- /dev/null +++ b/drivers/github_releases/types.go @@ -0,0 +1,213 @@ +package github_releases + +import ( + "encoding/json" + "strings" + "time" + + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" +) + +type MountPoint struct { + Point string // 挂载点 + Repo string // 仓库名 owner/repo + Release *Release // Release 指针 latest + Releases *[]Release // []Release 指针 + OtherFile *[]FileInfo // 仓库根目录下的其他文件 +} + +// 请求最新版本 +func (m *MountPoint) RequestRelease(get func(url string) (*resty.Response, error), refresh bool) { + if m.Repo == "" { + return + } + + if m.Release == nil || refresh { + resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases/latest") + m.Release = new(Release) + json.Unmarshal(resp.Body(), m.Release) + } +} + +// 请求所有版本 +func (m *MountPoint) RequestReleases(get func(url string) (*resty.Response, error), refresh bool) { + if m.Repo == "" { + return + } + + if m.Releases == nil || refresh { + resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases") + m.Releases = new([]Release) + json.Unmarshal(resp.Body(), m.Releases) + } +} + +// 获取最新版本 +func (m *MountPoint) GetLatestRelease() []File { + files := make([]File, 0) + for _, asset := range m.Release.Assets { + files = append(files, File{ + Path: m.Point + "/" + asset.Name, + FileName: asset.Name, + Size: asset.Size, + Type: "file", + UpdateAt: asset.UpdatedAt, + CreateAt: asset.CreatedAt, + Url: asset.BrowserDownloadUrl, + }) + } + return files +} + +// 获取最新版本大小 +func (m *MountPoint) GetLatestSize() int64 { + size := int64(0) + for _, asset := range m.Release.Assets { + size += asset.Size + } + return size +} + +// 获取所有版本 +func (m *MountPoint) GetAllVersion() []File { + files := make([]File, 0) + for _, release := range *m.Releases { + file := File{ + Path: m.Point + "/" + release.TagName, + FileName: release.TagName, + Size: m.GetSizeByTagName(release.TagName), + Type: "dir", + UpdateAt: release.PublishedAt, + CreateAt: release.CreatedAt, + Url: release.HtmlUrl, + } + for _, asset := range release.Assets { + file.Size += asset.Size + } + files = append(files, file) + } + return files +} + +// 根据版本号获取版本 +func (m *MountPoint) GetReleaseByTagName(tagName string) []File { + for _, item := range *m.Releases { + if item.TagName == tagName { + files := make([]File, 0) + for _, asset := range item.Assets { + files = append(files, File{ + Path: m.Point + "/" + tagName + "/" + asset.Name, + FileName: asset.Name, + Size: asset.Size, + Type: "file", + UpdateAt: asset.UpdatedAt, + CreateAt: asset.CreatedAt, + Url: asset.BrowserDownloadUrl, + }) + } + return files + } + } + return nil +} + +// 根据版本号获取版本大小 +func (m *MountPoint) GetSizeByTagName(tagName string) int64 { + if m.Releases == nil { + return 0 + } + for _, item := range *m.Releases { + if item.TagName == tagName { + size := int64(0) + for _, asset := range item.Assets { + size += asset.Size + } + return size + } + } + return 0 +} + +// 获取所有版本大小 +func (m *MountPoint) GetAllVersionSize() int64 { + if m.Releases == nil { + return 0 + } + size := int64(0) + for _, release := range *m.Releases { + for _, asset := range release.Assets { + size += asset.Size + } + } + return size +} + +func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File { + if m.OtherFile == nil || refresh { + resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents") + m.OtherFile = new([]FileInfo) + json.Unmarshal(resp.Body(), m.OtherFile) + } + + files := make([]File, 0) + defaultTime := "1970-01-01T00:00:00Z" + for _, file := range *m.OtherFile { + if strings.HasSuffix(file.Name, ".md") || strings.HasPrefix(file.Name, "LICENSE") { + files = append(files, File{ + Path: m.Point + "/" + file.Name, + FileName: file.Name, + Size: file.Size, + Type: "file", + UpdateAt: defaultTime, + CreateAt: defaultTime, + Url: file.DownloadUrl, + }) + } + } + return files +} + +type File struct { + Path string // 文件路径 + FileName string // 文件名 + Size int64 // 文件大小 + Type string // 文件类型 + UpdateAt string // 更新时间 eg:"2025-01-27T16:10:16Z" + CreateAt string // 创建时间 + Url string // 下载链接 +} + +func (f File) GetHash() utils.HashInfo { + return utils.HashInfo{} +} + +func (f File) GetPath() string { + return f.Path +} + +func (f File) GetSize() int64 { + return f.Size +} + +func (f File) GetName() string { + return f.FileName +} + +func (f File) ModTime() time.Time { + t, _ := time.Parse(time.RFC3339, f.CreateAt) + return t +} + +func (f File) CreateTime() time.Time { + t, _ := time.Parse(time.RFC3339, f.CreateAt) + return t +} + +func (f File) IsDir() bool { + return f.Type == "dir" +} + +func (f File) GetID() string { + return f.Url +} diff --git a/drivers/github_releases/util.go b/drivers/github_releases/util.go new file mode 100644 index 00000000..df846e8a --- /dev/null +++ b/drivers/github_releases/util.go @@ -0,0 +1,85 @@ +package github_releases + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" +) + +// 发送 GET 请求 +func (d *GithubReleases) GetRequest(url string) (*resty.Response, error) { + req := base.RestyClient.R() + req.SetHeader("Accept", "application/vnd.github+json") + req.SetHeader("X-GitHub-Api-Version", "2022-11-28") + if d.Addition.Token != "" { + req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", d.Addition.Token)) + } + res, err := req.Get(url) + if err != nil { + return nil, err + } + if res.StatusCode() != 200 { + log.Warn("failed to get request: ", res.StatusCode(), res.String()) + } + return res, nil +} + +// 解析挂载结构 +func (d *GithubReleases) ParseRepos(text string) ([]MountPoint, error) { + lines := strings.Split(text, "\n") + points := make([]MountPoint, 0) + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + parts := strings.Split(line, ":") + path, repo := "", "" + if len(parts) == 1 { + path = "/" + repo = parts[0] + } else if len(parts) == 2 { + path = fmt.Sprintf("/%s", strings.Trim(parts[0], "/")) + repo = parts[1] + } else { + return nil, fmt.Errorf("invalid format: %s", line) + } + + points = append(points, MountPoint{ + Point: path, + Repo: repo, + Release: nil, + Releases: nil, + }) + } + d.points = points + return points, nil +} + +// 获取下一级目录 +func GetNextDir(wholePath string, basePath string) string { + basePath = fmt.Sprintf("%s/", strings.TrimRight(basePath, "/")) + if !strings.HasPrefix(wholePath, basePath) { + return "" + } + remainingPath := strings.TrimLeft(strings.TrimPrefix(wholePath, basePath), "/") + if remainingPath != "" { + parts := strings.Split(remainingPath, "/") + nextDir := parts[0] + if strings.HasPrefix(wholePath, strings.TrimRight(basePath, "/")+"/"+nextDir) { + return nextDir + } + } + return "" +} + +// 判断当前目录是否是目标目录的祖先目录 +func IsAncestorDir(parentDir string, targetDir string) bool { + absTargetDir, _ := filepath.Abs(targetDir) + absParentDir, _ := filepath.Abs(parentDir) + return strings.HasPrefix(absTargetDir, absParentDir) +} diff --git a/drivers/google_drive/driver.go b/drivers/google_drive/driver.go index dccdcea9..c8afb084 100644 --- a/drivers/google_drive/driver.go +++ b/drivers/google_drive/driver.go @@ -158,7 +158,8 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi putUrl := res.Header().Get("location") if stream.GetSize() < d.ChunkSize*1024*1024 { _, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) { - req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream) + req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)). + SetBody(driver.NewLimitedUploadStream(ctx, stream)) }, nil) } else { err = d.chunkUpload(ctx, stream, putUrl) diff --git a/drivers/google_drive/util.go b/drivers/google_drive/util.go index 0d380112..0fe54346 100644 --- a/drivers/google_drive/util.go +++ b/drivers/google_drive/util.go @@ -11,10 +11,10 @@ import ( "strconv" "time" - "github.com/alist-org/alist/v3/pkg/http_range" - "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" "github.com/golang-jwt/jwt/v4" @@ -126,8 +126,7 @@ func (d *GoogleDrive) refreshToken() error { } d.AccessToken = resp.AccessToken return nil - } - if gdsaFileErr != nil && os.IsExist(gdsaFileErr) { + } else if os.IsExist(gdsaFileErr) { return gdsaFileErr } url := "https://www.googleapis.com/oauth2/v4/token" @@ -229,6 +228,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer if err != nil { return err } + reader = driver.NewLimitedUploadStream(ctx, reader) _, err = d.request(url, http.MethodPut, func(req *resty.Request) { req.SetHeaders(map[string]string{ "Content-Length": strconv.FormatInt(chunkSize, 10), diff --git a/drivers/google_photo/driver.go b/drivers/google_photo/driver.go index b54132ef..e6f0abc6 100644 --- a/drivers/google_photo/driver.go +++ b/drivers/google_photo/driver.go @@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi } resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) { - req.SetBody(stream).SetContext(ctx) + req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx) }, nil, postHeaders) if err != nil { diff --git a/drivers/halalcloud/driver.go b/drivers/halalcloud/driver.go index 08bb3808..26832760 100644 --- a/drivers/halalcloud/driver.go +++ b/drivers/halalcloud/driver.go @@ -4,12 +4,17 @@ import ( "context" "crypto/sha1" "fmt" + "io" + "net/url" + "path" + "strconv" + "time" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/http_range" - "github.com/alist-org/alist/v3/pkg/utils" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" @@ -19,11 +24,6 @@ import ( pubUserFile "github.com/city404/v6-public-rpc-proto/go/v6/userfile" "github.com/rclone/rclone/lib/readers" "github.com/zzzhr1990/go-common-entity/userfile" - "io" - "net/url" - "path" - "strconv" - "time" ) type HalalCloud struct { @@ -251,7 +251,6 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin size := result.FileSize chunks := getChunkSizes(result.Sizes) - var finalClosers utils.Closers resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { length := httpRange.Length if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size { @@ -269,7 +268,6 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin sha: result.Sha1, shaTemp: sha1.New(), } - finalClosers.Add(oo) return readers.NewLimitedReadCloser(oo, length), nil } @@ -281,7 +279,7 @@ func (d *HalalCloud) getLink(ctx context.Context, file model.Obj, args model.Lin duration = time.Until(time.Now().Add(time.Hour)) } - resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers} + resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader} return &model.Link{ RangeReadCloser: resultRangeReadCloser, Expiration: &duration, @@ -394,10 +392,11 @@ func (d *HalalCloud) put(ctx context.Context, dstDir model.Obj, fileStream model if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1) } + reader := driver.NewLimitedUploadStream(ctx, fileStream) _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ Bucket: aws.String(result.Bucket), Key: aws.String(result.Key), - Body: io.TeeReader(fileStream, driver.NewProgress(fileStream.GetSize(), up)), + Body: io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up)), }) return nil, err diff --git a/drivers/ilanzou/driver.go b/drivers/ilanzou/driver.go index ab5ebe7e..044193d3 100644 --- a/drivers/ilanzou/driver.go +++ b/drivers/ilanzou/driver.go @@ -2,7 +2,6 @@ package template import ( "context" - "crypto/md5" "encoding/base64" "encoding/hex" "fmt" @@ -17,6 +16,7 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/foxxorcat/mopan-sdk-go" "github.com/go-resty/resty/v2" @@ -66,12 +66,13 @@ func (d *ILanZou) Drop(ctx context.Context) error { } func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + offset := 1 var res []ListItem for { var resp ListResp _, err := d.proved("/record/file/list", http.MethodGet, func(req *resty.Request) { params := []string{ - "offset=1", + "offset=" + strconv.Itoa(offset), "limit=60", "folderId=" + dir.GetID(), "type=0", @@ -83,7 +84,9 @@ func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) return nil, err } res = append(res, resp.List...) - if resp.TotalPage <= resp.Offset { + if resp.Offset < resp.TotalPage { + offset++ + } else { break } } @@ -117,7 +120,7 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs) if err != nil { return nil, err } - ts, ts_str, err := getTimestamp(d.conf.secret) + ts, ts_str, _ := getTimestamp(d.conf.secret) params := []string{ "uuid=" + url.QueryEscape(d.UUID), @@ -146,11 +149,17 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs) u.RawQuery = strings.Join(params, "&") realURL := u.String() // get the url after redirect - res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{ - //"Origin": d.conf.site, + req := base.NoRedirectClient.R() + + req.SetHeaders(map[string]string{ "Referer": d.conf.site + "/", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0", - }).Get(realURL) + }) + if d.Addition.Ip != "" { + req.SetHeader("X-Forwarded-For", d.Addition.Ip) + } + + res, err := req.Get(realURL) if err != nil { return nil, err } @@ -263,30 +272,21 @@ func (d *ILanZou) Remove(ctx context.Context, obj model.Obj) error { const DefaultPartSize = 1024 * 1024 * 8 -func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { - h := md5.New() - // need to calculate md5 of the full content - tempFile, err := stream.CacheFullInTempFile() - if err != nil { - return nil, err +func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + etag := s.GetHash().GetHash(utils.MD5) + var err error + if len(etag) != utils.MD5.Width { + _, etag, err = stream.CacheFullInTempFileAndHash(s, utils.MD5) + if err != nil { + return nil, err + } } - defer func() { - _ = tempFile.Close() - }() - if _, err = utils.CopyWithBuffer(h, tempFile); err != nil { - return nil, err - } - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return nil, err - } - etag := hex.EncodeToString(h.Sum(nil)) // get upToken res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) { req.SetBody(base.Json{ "fileId": "", - "fileName": stream.GetName(), - "fileSize": stream.GetSize() / 1024, + "fileName": s.GetName(), + "fileSize": s.GetSize()/1024 + 1, "folderId": dstDir.GetID(), "md5": etag, "type": 1, @@ -298,13 +298,20 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt upToken := utils.Json.Get(res, "upToken").ToString() now := time.Now() key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli()) + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ + Reader: s, + Size: s.GetSize(), + }, + UpdateProgress: up, + }) var token string - if stream.GetSize() <= DefaultPartSize { - res, err := d.upClient.R().SetMultipartFormData(map[string]string{ + if s.GetSize() <= DefaultPartSize { + res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{ "token": upToken, "key": key, - "fname": stream.GetName(), - }).SetMultipartField("file", stream.GetName(), stream.GetMimetype(), tempFile). + "fname": s.GetName(), + }).SetMultipartField("file", s.GetName(), s.GetMimetype(), reader). Post("https://upload.qiniup.com/") if err != nil { return nil, err @@ -318,10 +325,10 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt } uploadId := utils.Json.Get(res.Body(), "uploadId").ToString() parts := make([]Part, 0) - partNum := (stream.GetSize() + DefaultPartSize - 1) / DefaultPartSize + partNum := (s.GetSize() + DefaultPartSize - 1) / DefaultPartSize for i := 1; i <= int(partNum); i++ { u := fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s/%d", d.conf.bucket, keyBase64, uploadId, i) - res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(tempFile, DefaultPartSize)).Put(u) + res, err = d.upClient.R().SetContext(ctx).SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(reader, DefaultPartSize)).Put(u) if err != nil { return nil, err } @@ -332,7 +339,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt }) } res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(base.Json{ - "fnmae": stream.GetName(), + "fnmae": s.GetName(), "parts": parts, }).Post(fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s", d.conf.bucket, keyBase64, uploadId)) if err != nil { @@ -370,9 +377,9 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt ID: strconv.FormatInt(file.FileId, 10), //Path: , Name: file.FileName, - Size: stream.GetSize(), - Modified: stream.ModTime(), - Ctime: stream.CreateTime(), + Size: s.GetSize(), + Modified: s.ModTime(), + Ctime: s.CreateTime(), IsFolder: false, HashInfo: utils.NewHashInfo(utils.MD5, etag), }, nil diff --git a/drivers/ilanzou/meta.go b/drivers/ilanzou/meta.go index f15fc01a..7a4a00fb 100644 --- a/drivers/ilanzou/meta.go +++ b/drivers/ilanzou/meta.go @@ -9,6 +9,7 @@ type Addition struct { driver.RootID Username string `json:"username" type:"string" required:"true"` Password string `json:"password" type:"string" required:"true"` + Ip string `json:"ip" type:"string"` Token string UUID string diff --git a/drivers/ilanzou/util.go b/drivers/ilanzou/util.go index a57e2a4a..81773afb 100644 --- a/drivers/ilanzou/util.go +++ b/drivers/ilanzou/util.go @@ -69,11 +69,17 @@ func (d *ILanZou) request(pathname, method string, callback base.ReqCallback, pr req := base.RestyClient.R() req.SetHeaders(map[string]string{ - "Origin": d.conf.site, - "Referer": d.conf.site + "/", - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0", + "Origin": d.conf.site, + "Referer": d.conf.site + "/", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0", + "Accept-Encoding": "gzip, deflate, br, zstd", + "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5", }) + if d.Addition.Ip != "" { + req.SetHeader("X-Forwarded-For", d.Addition.Ip) + } + if callback != nil { callback(req) } diff --git a/drivers/ipfs_api/driver.go b/drivers/ipfs_api/driver.go index f6f81305..264cef28 100644 --- a/drivers/ipfs_api/driver.go +++ b/drivers/ipfs_api/driver.go @@ -4,13 +4,12 @@ import ( "context" "fmt" "net/url" - stdpath "path" - "path/filepath" - "strings" + "path" + + shell "github.com/ipfs/go-ipfs-api" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" - shell "github.com/ipfs/go-ipfs-api" ) type IPFS struct { @@ -43,82 +42,143 @@ func (d *IPFS) Drop(ctx context.Context) error { } func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { - path := dir.GetPath() - if path[len(path):] != "/" { - path += "/" + var ipfsPath string + cid := dir.GetID() + if cid != "" { + ipfsPath = path.Join("/ipfs", cid) + } else { + // 可能出现ipns dns解析失败的情况,需要重复获取cid,其他情况应该不会出错 + ipfsPath = dir.GetPath() + switch d.Mode { + case "ipfs": + ipfsPath = path.Join("/ipfs", ipfsPath) + case "ipns": + ipfsPath = path.Join("/ipns", ipfsPath) + case "mfs": + fileStat, err := d.sh.FilesStat(ctx, ipfsPath) + if err != nil { + return nil, err + } + ipfsPath = path.Join("/ipfs", fileStat.Hash) + default: + return nil, fmt.Errorf("mode error") + } } - - path_cid, err := d.sh.FilesStat(ctx, path) - if err != nil { - return nil, err - } - - dirs, err := d.sh.List(path_cid.Hash) + dirs, err := d.sh.List(ipfsPath) if err != nil { return nil, err } objlist := []model.Obj{} for _, file := range dirs { - gateurl := *d.gateURL - gateurl.Path = "ipfs/" + file.Hash - gateurl.RawQuery = "filename=" + url.PathEscape(file.Name) - objlist = append(objlist, &model.ObjectURL{ - Object: model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1}, - Url: model.Url{Url: gateurl.String()}, - }) + objlist = append(objlist, &model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1}) } return objlist, nil } func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { - link := d.Gateway + "/ipfs/" + file.GetID() + "/?filename=" + url.PathEscape(file.GetName()) - return &model.Link{URL: link}, nil + gateurl := d.gateURL.JoinPath("/ipfs/", file.GetID()) + gateurl.RawQuery = "filename=" + url.QueryEscape(file.GetName()) + return &model.Link{URL: gateurl.String()}, nil } -func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { - path := parentDir.GetPath() - if path[len(path):] != "/" { - path += "/" +func (d *IPFS) Get(ctx context.Context, rawPath string) (model.Obj, error) { + rawPath = path.Join(d.GetRootPath(), rawPath) + var ipfsPath string + switch d.Mode { + case "ipfs": + ipfsPath = path.Join("/ipfs", rawPath) + case "ipns": + ipfsPath = path.Join("/ipns", rawPath) + case "mfs": + fileStat, err := d.sh.FilesStat(ctx, rawPath) + if err != nil { + return nil, err + } + ipfsPath = path.Join("/ipfs", fileStat.Hash) + default: + return nil, fmt.Errorf("mode error") } - return d.sh.FilesMkdir(ctx, path+dirName) + file, err := d.sh.FilesStat(ctx, ipfsPath) + if err != nil { + return nil, err + } + return &model.Object{ID: file.Hash, Name: path.Base(rawPath), Path: rawPath, Size: int64(file.Size), IsFolder: file.Type == "directory"}, nil } -func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error { - return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath()) +func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + if d.Mode != "mfs" { + return nil, fmt.Errorf("only write in mfs mode") + } + dirPath := parentDir.GetPath() + err := d.sh.FilesMkdir(ctx, path.Join(dirPath, dirName), shell.FilesMkdir.Parents(true)) + if err != nil { + return nil, err + } + file, err := d.sh.FilesStat(ctx, path.Join(dirPath, dirName)) + if err != nil { + return nil, err + } + return &model.Object{ID: file.Hash, Name: dirName, Path: path.Join(dirPath, dirName), Size: int64(file.Size), IsFolder: true}, nil } -func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error { - newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName - return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/")) +func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if d.Mode != "mfs" { + return nil, fmt.Errorf("only write in mfs mode") + } + dstPath := path.Join(dstDir.GetPath(), path.Base(srcObj.GetPath())) + d.sh.FilesRm(ctx, dstPath, true) + return &model.Object{ID: srcObj.GetID(), Name: srcObj.GetName(), Path: dstPath, Size: int64(srcObj.GetSize()), IsFolder: srcObj.IsDir()}, + d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath()) } -func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { - // TODO copy obj, optional - fmt.Println(srcObj.GetPath()) - fmt.Println(dstDir.GetPath()) - newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath()) - fmt.Println(newFileName) - return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/")) +func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + if d.Mode != "mfs" { + return nil, fmt.Errorf("only write in mfs mode") + } + dstPath := path.Join(path.Dir(srcObj.GetPath()), newName) + d.sh.FilesRm(ctx, dstPath, true) + return &model.Object{ID: srcObj.GetID(), Name: newName, Path: dstPath, Size: int64(srcObj.GetSize()), + IsFolder: srcObj.IsDir()}, d.sh.FilesMv(ctx, srcObj.GetPath(), dstPath) +} + +func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if d.Mode != "mfs" { + return nil, fmt.Errorf("only write in mfs mode") + } + dstPath := path.Join(dstDir.GetPath(), path.Base(srcObj.GetPath())) + d.sh.FilesRm(ctx, dstPath, true) + return &model.Object{ID: srcObj.GetID(), Name: srcObj.GetName(), Path: dstPath, Size: int64(srcObj.GetSize()), IsFolder: srcObj.IsDir()}, + d.sh.FilesCp(ctx, path.Join("/ipfs/", srcObj.GetID()), dstPath, shell.FilesCp.Parents(true)) } func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error { - // TODO remove obj, optional + if d.Mode != "mfs" { + return fmt.Errorf("only write in mfs mode") + } return d.sh.FilesRm(ctx, obj.GetPath(), true) } -func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - // TODO upload file, optional - _, err := d.sh.Add(stream, ToFiles(stdpath.Join(dstDir.GetPath(), stream.GetName()))) - return err -} - -func ToFiles(dstDir string) shell.AddOpts { - return func(rb *shell.RequestBuilder) error { - rb.Option("to-files", dstDir) - return nil +func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + if d.Mode != "mfs" { + return nil, fmt.Errorf("only write in mfs mode") } + outHash, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + })) + if err != nil { + return nil, err + } + dstPath := path.Join(dstDir.GetPath(), s.GetName()) + if s.GetExist() != nil { + d.sh.FilesRm(ctx, dstPath, true) + } + err = d.sh.FilesCp(ctx, path.Join("/ipfs/", outHash), dstPath, shell.FilesCp.Parents(true)) + gateurl := d.gateURL.JoinPath("/ipfs/", outHash) + gateurl.RawQuery = "filename=" + url.QueryEscape(s.GetName()) + return &model.Object{ID: outHash, Name: s.GetName(), Path: dstPath, Size: int64(s.GetSize()), IsFolder: s.IsDir()}, err } //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { diff --git a/drivers/ipfs_api/meta.go b/drivers/ipfs_api/meta.go index cdc30424..3837bec2 100644 --- a/drivers/ipfs_api/meta.go +++ b/drivers/ipfs_api/meta.go @@ -8,14 +8,16 @@ import ( type Addition struct { // Usually one of two driver.RootPath - Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"` - Gateway string `json:"gateway" default:"https://ipfs.io"` + Mode string `json:"mode" options:"ipfs,ipns,mfs" type:"select" required:"true"` + Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001" required:"true"` + Gateway string `json:"gateway" default:"http://127.0.0.1:8080" required:"true"` } var config = driver.Config{ Name: "IPFS API", DefaultRoot: "/", LocalSort: true, + OnlyProxy: false, } func init() { diff --git a/drivers/kodbox/driver.go b/drivers/kodbox/driver.go index eb5120a6..c536c916 100644 --- a/drivers/kodbox/driver.go +++ b/drivers/kodbox/driver.go @@ -3,8 +3,6 @@ package kodbox import ( "context" "fmt" - "github.com/alist-org/alist/v3/pkg/utils" - "github.com/go-resty/resty/v2" "net/http" "path/filepath" "strings" @@ -12,6 +10,8 @@ import ( "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/go-resty/resty/v2" ) type KodBox struct { @@ -225,14 +225,19 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error { return nil } -func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { +func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { var resp *CommonResp _, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) { - req.SetFileReader("file", stream.GetName(), stream). + r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) + req.SetFileReader("file", s.GetName(), r). SetResult(&resp). SetFormData(map[string]string{ "path": dstDir.GetPath(), - }) + }). + SetContext(ctx) }) if err != nil { return nil, err @@ -244,8 +249,8 @@ func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr return &model.ObjThumb{ Object: model.Object{ Path: resp.Info.(string), - Name: stream.GetName(), - Size: stream.GetSize(), + Name: s.GetName(), + Size: s.GetSize(), IsFolder: false, Modified: time.Now(), Ctime: time.Now(), diff --git a/drivers/lanzou/driver.go b/drivers/lanzou/driver.go index 9e73f052..877e72bb 100644 --- a/drivers/lanzou/driver.go +++ b/drivers/lanzou/driver.go @@ -208,18 +208,22 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error { return errs.NotSupport } -func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { +func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { if d.IsCookie() || d.IsAccount() { var resp RespText[[]FileOrFolder] _, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) { + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) req.SetFormData(map[string]string{ "task": "1", "vie": "2", "ve": "2", "id": "WU_FILE_0", - "name": stream.GetName(), + "name": s.GetName(), "folder_id_bb_n": dstDir.GetID(), - }).SetFileReader("upload_file", stream.GetName(), stream).SetContext(ctx) + }).SetFileReader("upload_file", s.GetName(), reader).SetContext(ctx) }, &resp, true) if err != nil { return nil, err diff --git a/drivers/lanzou/help.go b/drivers/lanzou/help.go index 31a558e9..c3f5c6bb 100644 --- a/drivers/lanzou/help.go +++ b/drivers/lanzou/help.go @@ -78,6 +78,42 @@ func RemoveNotes(html string) string { }) } +// 清理JS注释 +func RemoveJSComment(data string) string { + var result strings.Builder + inComment := false + inSingleLineComment := false + + for i := 0; i < len(data); i++ { + v := data[i] + + if inSingleLineComment && (v == '\n' || v == '\r') { + inSingleLineComment = false + result.WriteByte(v) + continue + } + if inComment && v == '*' && i+1 < len(data) && data[i+1] == '/' { + inComment = false + continue + } + if v == '/' && i+1 < len(data) { + nextChar := data[i+1] + if nextChar == '*' { + inComment = true + i++ + continue + } else if nextChar == '/' { + inSingleLineComment = true + i++ + continue + } + } + result.WriteByte(v) + } + + return result.String() +} + var findAcwScV2Reg = regexp.MustCompile(`arg1='([0-9A-Z]+)'`) // 在页面被过多访问或其他情况下,有时候会先返回一个加密的页面,其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面 @@ -120,9 +156,9 @@ var findKVReg = regexp.MustCompile(`'(.+?)':('?([^' },]*)'?)`) // 拆分kv func findJSVarFunc(key, data string) string { var values []string if key != "sasign" { - values = regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data) + values = regexp.MustCompile(`var ` + key + `\s*=\s*['"]?(.+?)['"]?;`).FindStringSubmatch(data) } else { - matches := regexp.MustCompile(`var `+key+` = '(.+?)';`).FindAllStringSubmatch(data, -1) + matches := regexp.MustCompile(`var `+key+`\s*=\s*['"]?(.+?)['"]?;`).FindAllStringSubmatch(data, -1) if len(matches) == 3 { values = matches[1] } else { diff --git a/drivers/lanzou/util.go b/drivers/lanzou/util.go index abc2c400..e66252bc 100644 --- a/drivers/lanzou/util.go +++ b/drivers/lanzou/util.go @@ -264,6 +264,9 @@ var findSubFolderReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/ // 获取下载页面链接 var findDownPageParamReg = regexp.MustCompile(` 1 { + fileID = fileIDs[1] + } else { + return nil, fmt.Errorf("not find file id") + } var resp FileShareInfoAndUrlResp[string] - _, err = d.post(d.ShareUrl+"/ajaxm.php", func(req *resty.Request) { req.SetFormData(param) }, &resp) + _, err = d.post(d.ShareUrl+"/ajaxm.php?file="+fileID, func(req *resty.Request) { req.SetFormData(param) }, &resp) if err != nil { return nil, err } @@ -381,8 +396,15 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) ( return nil, err } + fileIDs := findFileIDReg.FindStringSubmatch(nextPageData) + var fileID string + if len(fileIDs) > 1 { + fileID = fileIDs[1] + } else { + return nil, fmt.Errorf("not find file id") + } var resp FileShareInfoAndUrlResp[int] - _, err = d.post(d.ShareUrl+"/ajaxm.php", func(req *resty.Request) { req.SetFormData(param) }, &resp) + _, err = d.post(d.ShareUrl+"/ajaxm.php?file="+fileID, func(req *resty.Request) { req.SetFormData(param) }, &resp) if err != nil { return nil, err } diff --git a/drivers/lark/driver.go b/drivers/lark/driver.go index d2672300..fbf7529a 100644 --- a/drivers/lark/driver.go +++ b/drivers/lark/driver.go @@ -320,7 +320,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea Build() // 发起请求 - uploadLimit.Wait(ctx) + err := uploadLimit.Wait(ctx) + if err != nil { + return nil, err + } resp, err := c.client.Drive.File.UploadPrepare(ctx, req) if err != nil { return nil, err @@ -341,7 +344,7 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea length = stream.GetSize() - int64(i*blockSize) } - reader := io.LimitReader(stream, length) + reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, length)) req := larkdrive.NewUploadPartFileReqBuilder(). Body(larkdrive.NewUploadPartFileReqBodyBuilder(). @@ -353,7 +356,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea Build() // 发起请求 - uploadLimit.Wait(ctx) + err = uploadLimit.Wait(ctx) + if err != nil { + return nil, err + } resp, err := c.client.Drive.File.UploadPart(ctx, req) if err != nil { diff --git a/drivers/lenovonas_share/driver.go b/drivers/lenovonas_share/driver.go index 12e85143..684a2dda 100644 --- a/drivers/lenovonas_share/driver.go +++ b/drivers/lenovonas_share/driver.go @@ -3,6 +3,7 @@ package LenovoNasShare import ( "context" "net/http" + "time" "github.com/go-resty/resty/v2" @@ -15,7 +16,8 @@ import ( type LenovoNasShare struct { model.Storage Addition - stoken string + stoken string + expireAt int64 } func (d *LenovoNasShare) Config() driver.Config { @@ -27,20 +29,9 @@ func (d *LenovoNasShare) GetAddition() driver.Additional { } func (d *LenovoNasShare) Init(ctx context.Context) error { - if d.Host == "" { - d.Host = "https://siot-share.lenovo.com.cn" - } - query := map[string]string{ - "code": d.ShareId, - "password": d.SharePwd, - } - resp, err := d.request(d.Host+"/oneproxy/api/share/v1/access", http.MethodGet, func(req *resty.Request) { - req.SetQueryParams(query) - }, nil) - if err != nil { + if err := d.getStoken(); err != nil { return err } - d.stoken = utils.Json.Get(resp, "data", "stoken").ToString() return nil } @@ -49,6 +40,7 @@ func (d *LenovoNasShare) Drop(ctx context.Context) error { } func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + d.checkStoken() // 检查stoken是否过期 files := make([]File, 0) var resp Files @@ -71,7 +63,33 @@ func (d *LenovoNasShare) List(ctx context.Context, dir model.Obj, args model.Lis }) } +func (d *LenovoNasShare) checkStoken() { // 检查stoken是否过期 + if d.expireAt < time.Now().Unix() { + d.getStoken() + } +} + +func (d *LenovoNasShare) getStoken() error { // 获取stoken + if d.Host == "" { + d.Host = "https://siot-share.lenovo.com.cn" + } + query := map[string]string{ + "code": d.ShareId, + "password": d.SharePwd, + } + resp, err := d.request(d.Host+"/oneproxy/api/share/v1/access", http.MethodGet, func(req *resty.Request) { + req.SetQueryParams(query) + }, nil) + if err != nil { + return err + } + d.stoken = utils.Json.Get(resp, "data", "stoken").ToString() + d.expireAt = utils.Json.Get(resp, "data", "expires_in").ToInt64() + time.Now().Unix() - 60 + return nil +} + func (d *LenovoNasShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + d.checkStoken() // 检查stoken是否过期 query := map[string]string{ "code": d.ShareId, "stoken": d.stoken, diff --git a/drivers/lenovonas_share/types.go b/drivers/lenovonas_share/types.go index 77b966d3..37ff1465 100644 --- a/drivers/lenovonas_share/types.go +++ b/drivers/lenovonas_share/types.go @@ -47,7 +47,11 @@ func (f File) GetPath() string { } func (f File) GetSize() int64 { - return f.Size + if f.IsDir() { + return 0 + } else { + return f.Size + } } func (f File) GetName() string { @@ -70,10 +74,6 @@ func (f File) GetID() string { return f.GetPath() } -func (f File) Thumb() string { - return "" -} - type Files struct { Data struct { List []File `json:"list"` diff --git a/drivers/local/driver.go b/drivers/local/driver.go index bf993e5d..faa2b3bd 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -22,6 +22,7 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/alist-org/times" + cp "github.com/otiai10/copy" log "github.com/sirupsen/logrus" _ "golang.org/x/image/webp" ) @@ -34,6 +35,10 @@ type Local struct { // zero means no limit thumbConcurrency int thumbTokenBucket TokenBucket + + // video thumb position + videoThumbPos float64 + videoThumbPosIsPercentage bool } func (d *Local) Config() driver.Config { @@ -76,7 +81,33 @@ func (d *Local) Init(ctx context.Context) error { if d.thumbConcurrency == 0 { d.thumbTokenBucket = NewNopTokenBucket() } else { - d.thumbTokenBucket = NewStaticTokenBucket(d.thumbConcurrency) + d.thumbTokenBucket = NewStaticTokenBucketWithMigration(d.thumbTokenBucket, d.thumbConcurrency) + } + // Check the VideoThumbPos value + if d.VideoThumbPos == "" { + d.VideoThumbPos = "20%" + } + if strings.HasSuffix(d.VideoThumbPos, "%") { + percentage := strings.TrimSuffix(d.VideoThumbPos, "%") + val, err := strconv.ParseFloat(percentage, 64) + if err != nil { + return fmt.Errorf("invalid video_thumb_pos value: %s, err: %s", d.VideoThumbPos, err) + } + if val < 0 || val > 100 { + return fmt.Errorf("invalid video_thumb_pos value: %s, the precentage must be a number between 0 and 100", d.VideoThumbPos) + } + d.videoThumbPosIsPercentage = true + d.videoThumbPos = val / 100 + } else { + val, err := strconv.ParseFloat(d.VideoThumbPos, 64) + if err != nil { + return fmt.Errorf("invalid video_thumb_pos value: %s, err: %s", d.VideoThumbPos, err) + } + if val < 0 { + return fmt.Errorf("invalid video_thumb_pos value: %s, the time must be a positive number", d.VideoThumbPos) + } + d.videoThumbPosIsPercentage = false + d.videoThumbPos = val } return nil } @@ -100,17 +131,17 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ if !d.ShowHidden && strings.HasPrefix(f.Name(), ".") { continue } - file := d.FileInfoToObj(f, args.ReqPath, fullPath) + file := d.FileInfoToObj(ctx, f, args.ReqPath, fullPath) files = append(files, file) } return files, nil } -func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) model.Obj { +func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string, fullPath string) model.Obj { thumb := "" if d.Thumbnail { typeName := utils.GetFileType(f.Name()) if typeName == conf.IMAGE || typeName == conf.VIDEO { - thumb = common.GetApiUrl(nil) + stdpath.Join("/d", reqPath, f.Name()) + thumb = common.GetApiUrl(common.GetHttpReq(ctx)) + stdpath.Join("/d", reqPath, f.Name()) thumb = utils.EncodePath(thumb, true) thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(reqPath, f.Name())) } @@ -148,7 +179,7 @@ func (d *Local) GetMeta(ctx context.Context, path string) (model.Obj, error) { if err != nil { return nil, err } - file := d.FileInfoToObj(f, path, path) + file := d.FileInfoToObj(ctx, f, path, path) //h := "123123" //if s, ok := f.(model.SetHash); ok && file.GetHash() == ("","") { // s.SetHash(h,"SHA1") @@ -241,11 +272,22 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error { if utils.IsSubPath(srcPath, dstPath) { return fmt.Errorf("the destination folder is a subfolder of the source folder") } - err := os.Rename(srcPath, dstPath) - if err != nil { + if err := os.Rename(srcPath, dstPath); err != nil && strings.Contains(err.Error(), "invalid cross-device link") { + // Handle cross-device file move in local driver + if err = d.Copy(ctx, srcObj, dstDir); err != nil { + return err + } else { + // Directly remove file without check recycle bin if successfully copied + if srcObj.IsDir() { + err = os.RemoveAll(srcObj.GetPath()) + } else { + err = os.Remove(srcObj.GetPath()) + } + return err + } + } else { return err } - return nil } func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error { @@ -258,22 +300,18 @@ func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) er return nil } -func (d *Local) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { +func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error { srcPath := srcObj.GetPath() dstPath := filepath.Join(dstDir.GetPath(), srcObj.GetName()) if utils.IsSubPath(srcPath, dstPath) { return fmt.Errorf("the destination folder is a subfolder of the source folder") } - var err error - if srcObj.IsDir() { - err = utils.CopyDir(srcPath, dstPath) - } else { - err = utils.CopyFile(srcPath, dstPath) - } - if err != nil { - return err - } - return nil + // Copy using otiai10/copy to perform more secure & efficient copy + return cp.Copy(srcPath, dstPath, cp.Options{ + Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS + PreserveTimes: true, + PreserveOwner: true, + }) } func (d *Local) Remove(ctx context.Context, obj model.Obj) error { diff --git a/drivers/local/meta.go b/drivers/local/meta.go index 5ffac920..14b0404f 100644 --- a/drivers/local/meta.go +++ b/drivers/local/meta.go @@ -10,6 +10,7 @@ type Addition struct { Thumbnail bool `json:"thumbnail" required:"true" help:"enable thumbnail"` ThumbCacheFolder string `json:"thumb_cache_folder"` ThumbConcurrency string `json:"thumb_concurrency" default:"16" required:"false" help:"Number of concurrent thumbnail generation goroutines. This controls how many thumbnails can be generated in parallel."` + VideoThumbPos string `json:"video_thumb_pos" default:"20%" required:"false" help:"The position of the video thumbnail. If the value is a number (integer ot floating point), it represents the time in seconds. If the value ends with '%', it represents the percentage of the video duration."` ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"` MkdirPerm string `json:"mkdir_perm" default:"777"` RecycleBinPath string `json:"recycle_bin_path" default:"delete permanently" help:"path to recycle bin, delete permanently if empty or keep 'delete permanently'"` diff --git a/drivers/local/token_bucket.go b/drivers/local/token_bucket.go index 38fbe73f..23c6ebd6 100644 --- a/drivers/local/token_bucket.go +++ b/drivers/local/token_bucket.go @@ -23,6 +23,38 @@ func NewStaticTokenBucket(size int) StaticTokenBucket { return StaticTokenBucket{bucket: bucket} } +func NewStaticTokenBucketWithMigration(oldBucket TokenBucket, size int) StaticTokenBucket { + if oldBucket != nil { + oldStaticBucket, ok := oldBucket.(StaticTokenBucket) + if ok { + oldSize := cap(oldStaticBucket.bucket) + migrateSize := oldSize + if size < migrateSize { + migrateSize = size + } + + bucket := make(chan struct{}, size) + for range size - migrateSize { + bucket <- struct{}{} + } + + if migrateSize != 0 { + go func() { + for range migrateSize { + <-oldStaticBucket.bucket + bucket <- struct{}{} + } + close(oldStaticBucket.bucket) + }() + } + return StaticTokenBucket{bucket: bucket} + } + } + return NewStaticTokenBucket(size) +} + +// Take channel maybe closed when local driver is modified. +// don't call Put method after the channel is closed. func (b StaticTokenBucket) Take() <-chan struct{} { return b.bucket } @@ -35,8 +67,10 @@ func (b StaticTokenBucket) Do(ctx context.Context, f func() error) error { select { case <-ctx.Done(): return ctx.Err() - case <-b.bucket: - defer b.Put() + case _, ok := <-b.Take(): + if ok { + defer b.Put() + } } return f() } diff --git a/drivers/local/util.go b/drivers/local/util.go index b994c205..802f60cf 100644 --- a/drivers/local/util.go +++ b/drivers/local/util.go @@ -2,11 +2,13 @@ package local import ( "bytes" + "encoding/json" "fmt" "io/fs" "os" "path/filepath" "sort" + "strconv" "strings" "github.com/alist-org/alist/v3/internal/conf" @@ -34,10 +36,50 @@ func isSymlinkDir(f fs.FileInfo, path string) bool { return false } -func GetSnapshot(videoPath string, frameNum int) (imgData *bytes.Buffer, err error) { +// Get the snapshot of the video +func (d *Local) GetSnapshot(videoPath string) (imgData *bytes.Buffer, err error) { + // Run ffprobe to get the video duration + jsonOutput, err := ffmpeg.Probe(videoPath) + if err != nil { + return nil, err + } + // get format.duration from the json string + type probeFormat struct { + Duration string `json:"duration"` + } + type probeData struct { + Format probeFormat `json:"format"` + } + var probe probeData + err = json.Unmarshal([]byte(jsonOutput), &probe) + if err != nil { + return nil, err + } + totalDuration, err := strconv.ParseFloat(probe.Format.Duration, 64) + if err != nil { + return nil, err + } + + var ss string + if d.videoThumbPosIsPercentage { + ss = fmt.Sprintf("%f", totalDuration*d.videoThumbPos) + } else { + // If the value is greater than the total duration, use the total duration + if d.videoThumbPos > totalDuration { + ss = fmt.Sprintf("%f", totalDuration) + } else { + ss = fmt.Sprintf("%f", d.videoThumbPos) + } + } + + // Run ffmpeg to get the snapshot srcBuf := bytes.NewBuffer(nil) - stream := ffmpeg.Input(videoPath). - Filter("select", ffmpeg.Args{fmt.Sprintf("gte(n,%d)", frameNum)}). + // If the remaining time from the seek point to the end of the video is less + // than the duration of a single frame, ffmpeg cannot extract any frames + // within the specified range and will exit with an error. + // The "noaccurate_seek" option prevents this error and would also speed up + // the seek process. + stream := ffmpeg.Input(videoPath, ffmpeg.KwArgs{"ss": ss, "noaccurate_seek": ""}). Output("pipe:", ffmpeg.KwArgs{"vframes": 1, "format": "image2", "vcodec": "mjpeg"}). GlobalArgs("-loglevel", "error").Silent(true). WithOutput(srcBuf, os.Stdout) @@ -77,7 +119,7 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) { } var srcBuf *bytes.Buffer if utils.GetFileType(file.GetName()) == conf.VIDEO { - videoBuf, err := GetSnapshot(fullPath, 10) + videoBuf, err := d.GetSnapshot(fullPath) if err != nil { return nil, nil, err } diff --git a/drivers/mediatrack/driver.go b/drivers/mediatrack/driver.go index f0f1ded0..50ef9799 100644 --- a/drivers/mediatrack/driver.go +++ b/drivers/mediatrack/driver.go @@ -161,7 +161,7 @@ func (d *MediaTrack) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { src := "assets/" + uuid.New().String() var resp UploadResp _, err := d.request("https://jayce.api.mediatrack.cn/v3/storage/tokens/asset", http.MethodGet, func(req *resty.Request) { @@ -180,7 +180,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil if err != nil { return err } - tempFile, err := stream.CacheFullInTempFile() + tempFile, err := file.CacheFullInTempFile() if err != nil { return err } @@ -188,13 +188,19 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil _ = tempFile.Close() }() uploader := s3manager.NewUploader(s) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) } input := &s3manager.UploadInput{ Bucket: &resp.Data.Bucket, Key: &resp.Data.Object, - Body: tempFile, + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ + Reader: tempFile, + Size: file.GetSize(), + }, + UpdateProgress: up, + }), } _, err = uploader.UploadWithContext(ctx, input) if err != nil { @@ -213,12 +219,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil hash := hex.EncodeToString(h.Sum(nil)) data := base.Json{ "category": 0, - "description": stream.GetName(), + "description": file.GetName(), "hash": hash, - "mime": stream.GetMimetype(), - "size": stream.GetSize(), + "mime": file.GetMimetype(), + "size": file.GetSize(), "src": src, - "title": stream.GetName(), + "title": file.GetName(), "type": 0, } _, err = d.request(url, http.MethodPost, func(req *resty.Request) { diff --git a/drivers/mega/driver.go b/drivers/mega/driver.go index 162aeef3..dc7b2201 100644 --- a/drivers/mega/driver.go +++ b/drivers/mega/driver.go @@ -56,12 +56,21 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([] if err != nil { return nil, err } - res := make([]model.Obj, 0) + fn := make(map[string]model.Obj) for i := range nodes { n := nodes[i] - if n.GetType() == mega.FILE || n.GetType() == mega.FOLDER { - res = append(res, &MegaNode{n}) + if n.GetType() != mega.FILE && n.GetType() != mega.FOLDER { + continue } + if _, ok := fn[n.GetName()]; !ok { + fn[n.GetName()] = &MegaNode{n} + } else if sameNameObj := fn[n.GetName()]; (&MegaNode{n}).ModTime().After(sameNameObj.ModTime()) { + fn[n.GetName()] = &MegaNode{n} + } + } + res := make([]model.Obj, 0) + for _, v := range fn { + res = append(res, v) } return res, nil } @@ -84,7 +93,6 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* //} size := file.GetSize() - var finalClosers utils.Closers resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { length := httpRange.Length if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size { @@ -103,11 +111,10 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* d: down, skip: httpRange.Start, } - finalClosers.Add(oo) return readers.NewLimitedReadCloser(oo, length), nil } - resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers} + resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader} resultLink := &model.Link{ RangeReadCloser: resultRangeReadCloser, } @@ -158,6 +165,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea return err } + reader := driver.NewLimitedUploadStream(ctx, stream) for id := 0; id < u.Chunks(); id++ { if utils.IsCanceled(ctx) { return ctx.Err() @@ -167,7 +175,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea return err } chunk := make([]byte, chkSize) - n, err := io.ReadFull(stream, chunk) + n, err := io.ReadFull(reader, chunk) if err != nil && err != io.EOF { return err } diff --git a/drivers/misskey/driver.go b/drivers/misskey/driver.go new file mode 100644 index 00000000..b5c753f3 --- /dev/null +++ b/drivers/misskey/driver.go @@ -0,0 +1,74 @@ +package misskey + +import ( + "context" + "strings" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" +) + +type Misskey struct { + model.Storage + Addition +} + +func (d *Misskey) Config() driver.Config { + return config +} + +func (d *Misskey) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Misskey) Init(ctx context.Context) error { + d.Endpoint = strings.TrimSuffix(d.Endpoint, "/") + if d.Endpoint == "" || d.AccessToken == "" { + return errs.EmptyToken + } else { + return nil + } +} + +func (d *Misskey) Drop(ctx context.Context) error { + return nil +} + +func (d *Misskey) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + return d.list(dir) +} + +func (d *Misskey) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + return d.link(file) +} + +func (d *Misskey) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + return d.makeDir(parentDir, dirName) +} + +func (d *Misskey) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + return d.move(srcObj, dstDir) +} + +func (d *Misskey) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + return d.rename(srcObj, newName) +} + +func (d *Misskey) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + return d.copy(srcObj, dstDir) +} + +func (d *Misskey) Remove(ctx context.Context, obj model.Obj) error { + return d.remove(obj) +} + +func (d *Misskey) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + return d.put(ctx, dstDir, stream, up) +} + +//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*Misskey)(nil) diff --git a/drivers/misskey/meta.go b/drivers/misskey/meta.go new file mode 100644 index 00000000..b8a80c15 --- /dev/null +++ b/drivers/misskey/meta.go @@ -0,0 +1,35 @@ +package misskey + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + driver.RootPath + // define other + // Field string `json:"field" type:"select" required:"true" options:"a,b,c" default:"a"` + Endpoint string `json:"endpoint" required:"true" default:"https://misskey.io"` + AccessToken string `json:"access_token" required:"true"` +} + +var config = driver.Config{ + Name: "Misskey", + LocalSort: false, + OnlyLocal: false, + OnlyProxy: false, + NoCache: false, + NoUpload: false, + NeedMs: false, + DefaultRoot: "/", + CheckStatus: false, + Alert: "", + NoOverwriteUpload: false, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Misskey{} + }) +} diff --git a/drivers/misskey/types.go b/drivers/misskey/types.go new file mode 100644 index 00000000..e9adc8d2 --- /dev/null +++ b/drivers/misskey/types.go @@ -0,0 +1,35 @@ +package misskey + +type Resp struct { + Code int + Raw []byte +} + +type Properties struct { + Width int `json:"width"` + Height int `json:"height"` +} + +type MFile struct { + ID string `json:"id"` + CreatedAt string `json:"createdAt"` + Name string `json:"name"` + Type string `json:"type"` + MD5 string `json:"md5"` + Size int64 `json:"size"` + IsSensitive bool `json:"isSensitive"` + Blurhash string `json:"blurhash"` + Properties Properties `json:"properties"` + URL string `json:"url"` + ThumbnailURL string `json:"thumbnailUrl"` + Comment *string `json:"comment"` + FolderID *string `json:"folderId"` + Folder MFolder `json:"folder"` +} + +type MFolder struct { + ID string `json:"id"` + CreatedAt string `json:"createdAt"` + Name string `json:"name"` + ParentID *string `json:"parentId"` +} diff --git a/drivers/misskey/util.go b/drivers/misskey/util.go new file mode 100644 index 00000000..f8baeafa --- /dev/null +++ b/drivers/misskey/util.go @@ -0,0 +1,256 @@ +package misskey + +import ( + "context" + "errors" + "io" + "time" + + "github.com/go-resty/resty/v2" + + "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" +) + +// Base layer methods + +func (d *Misskey) request(path, method string, callback base.ReqCallback, resp interface{}) error { + url := d.Endpoint + "/api/drive" + path + req := base.RestyClient.R() + + req.SetAuthToken(d.AccessToken).SetHeader("Content-Type", "application/json") + + if callback != nil { + callback(req) + } else { + req.SetBody("{}") + } + + req.SetResult(resp) + + // 启用调试模式 + req.EnableTrace() + + response, err := req.Execute(method, url) + if err != nil { + return err + } + if !response.IsSuccess() { + return errors.New(response.String()) + } + return nil +} + +func (d *Misskey) getThumb(ctx context.Context, obj model.Obj) (io.Reader, error) { + // TODO return the thumb of obj, optional + return nil, errs.NotImplement +} + +func setBody(body interface{}) base.ReqCallback { + return func(req *resty.Request) { + req.SetBody(body) + } +} + +func handleFolderId(dir model.Obj) interface{} { + if dir.GetID() == "" { + return nil + } + return dir.GetID() +} + +// API layer methods + +func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) { + var files []MFile + var body map[string]string + if dir.GetPath() != "/" { + body = map[string]string{"folderId": dir.GetID()} + } else { + body = map[string]string{} + } + err := d.request("/files", "POST", setBody(body), &files) + if err != nil { + return []model.Obj{}, err + } + return utils.SliceConvert(files, func(src MFile) (model.Obj, error) { + return mFile2Object(src), nil + }) +} + +func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) { + var folders []MFolder + var body map[string]string + if dir.GetPath() != "/" { + body = map[string]string{"folderId": dir.GetID()} + } else { + body = map[string]string{} + } + err := d.request("/folders", "POST", setBody(body), &folders) + if err != nil { + return []model.Obj{}, err + } + return utils.SliceConvert(folders, func(src MFolder) (model.Obj, error) { + return mFolder2Object(src), nil + }) +} + +func (d *Misskey) list(dir model.Obj) ([]model.Obj, error) { + files, _ := d.getFiles(dir) + folders, _ := d.getFolders(dir) + return append(files, folders...), nil +} + +func (d *Misskey) link(file model.Obj) (*model.Link, error) { + var mFile MFile + err := d.request("/files/show", "POST", setBody(map[string]string{"fileId": file.GetID()}), &mFile) + if err != nil { + return nil, err + } + return &model.Link{ + URL: mFile.URL, + }, nil +} + +func (d *Misskey) makeDir(parentDir model.Obj, dirName string) (model.Obj, error) { + var folder MFolder + err := d.request("/folders/create", "POST", setBody(map[string]interface{}{"parentId": handleFolderId(parentDir), "name": dirName}), &folder) + if err != nil { + return nil, err + } + return mFolder2Object(folder), nil +} + +func (d *Misskey) move(srcObj, dstDir model.Obj) (model.Obj, error) { + if srcObj.IsDir() { + var folder MFolder + err := d.request("/folders/update", "POST", setBody(map[string]interface{}{"folderId": srcObj.GetID(), "parentId": handleFolderId(dstDir)}), &folder) + return mFolder2Object(folder), err + } else { + var file MFile + err := d.request("/files/update", "POST", setBody(map[string]interface{}{"fileId": srcObj.GetID(), "folderId": handleFolderId(dstDir)}), &file) + return mFile2Object(file), err + } +} + +func (d *Misskey) rename(srcObj model.Obj, newName string) (model.Obj, error) { + if srcObj.IsDir() { + var folder MFolder + err := d.request("/folders/update", "POST", setBody(map[string]string{"folderId": srcObj.GetID(), "name": newName}), &folder) + return mFolder2Object(folder), err + } else { + var file MFile + err := d.request("/files/update", "POST", setBody(map[string]string{"fileId": srcObj.GetID(), "name": newName}), &file) + return mFile2Object(file), err + } +} + +func (d *Misskey) copy(srcObj, dstDir model.Obj) (model.Obj, error) { + if srcObj.IsDir() { + folder, err := d.makeDir(dstDir, srcObj.GetName()) + if err != nil { + return nil, err + } + list, err := d.list(srcObj) + if err != nil { + return nil, err + } + for _, obj := range list { + _, err := d.copy(obj, folder) + if err != nil { + return nil, err + } + } + return folder, nil + } else { + var file MFile + url, err := d.link(srcObj) + if err != nil { + return nil, err + } + err = d.request("/files/upload-from-url", "POST", setBody(map[string]interface{}{"url": url.URL, "folderId": handleFolderId(dstDir)}), &file) + if err != nil { + return nil, err + } + return mFile2Object(file), nil + } +} + +func (d *Misskey) remove(obj model.Obj) error { + if obj.IsDir() { + err := d.request("/folders/delete", "POST", setBody(map[string]string{"folderId": obj.GetID()}), nil) + return err + } else { + err := d.request("/files/delete", "POST", setBody(map[string]string{"fileId": obj.GetID()}), nil) + return err + } +} + +func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { + var file MFile + + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: stream, + UpdateProgress: up, + }) + req := base.RestyClient.R(). + SetContext(ctx). + SetFileReader("file", stream.GetName(), reader). + SetFormData(map[string]string{ + "folderId": handleFolderId(dstDir).(string), + "name": stream.GetName(), + "comment": "", + "isSensitive": "false", + "force": "false", + }). + SetResult(&file). + SetAuthToken(d.AccessToken) + + resp, err := req.Post(d.Endpoint + "/api/drive/files/create") + if err != nil { + return nil, err + } + if !resp.IsSuccess() { + return nil, errors.New(resp.String()) + } + + return mFile2Object(file), nil +} + +func mFile2Object(file MFile) *model.ObjThumbURL { + ctime, err := time.Parse(time.RFC3339, file.CreatedAt) + if err != nil { + ctime = time.Time{} + } + return &model.ObjThumbURL{ + Object: model.Object{ + ID: file.ID, + Name: file.Name, + Ctime: ctime, + IsFolder: false, + Size: file.Size, + }, + Thumbnail: model.Thumbnail{ + Thumbnail: file.ThumbnailURL, + }, + Url: model.Url{ + Url: file.URL, + }, + } +} + +func mFolder2Object(folder MFolder) *model.Object { + ctime, err := time.Parse(time.RFC3339, folder.CreatedAt) + if err != nil { + ctime = time.Time{} + } + return &model.Object{ + ID: folder.ID, + Name: folder.Name, + Ctime: ctime, + IsFolder: true, + } +} diff --git a/drivers/mopan/driver.go b/drivers/mopan/driver.go index 369ec83b..f8f14300 100644 --- a/drivers/mopan/driver.go +++ b/drivers/mopan/driver.go @@ -10,6 +10,8 @@ import ( "strings" "time" + "golang.org/x/sync/semaphore" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" @@ -267,9 +269,6 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre if err != nil { return nil, err } - defer func() { - _ = file.Close() - }() // step.1 uploadPartData, err := mopan.InitUploadPartData(ctx, mopan.UpdloadFileParam{ @@ -301,6 +300,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre retry.Attempts(3), retry.Delay(time.Second), retry.DelayType(retry.BackOffDelay)) + sem := semaphore.NewWeighted(3) // step.3 parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos) @@ -319,7 +319,12 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre // step.4 threadG.Go(func(ctx context.Context) error { - req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)) + if err = sem.Acquire(ctx, 1); err != nil { + return err + } + defer sem.Release(1) + reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize) + req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader)) if err != nil { return err } @@ -328,7 +333,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre if err != nil { return err } - resp.Body.Close() + _ = resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("upload err,code=%d", resp.StatusCode) } diff --git a/drivers/netease_music/driver.go b/drivers/netease_music/driver.go index c0d103de..08460cce 100644 --- a/drivers/netease_music/driver.go +++ b/drivers/netease_music/driver.go @@ -88,7 +88,7 @@ func (d *NeteaseMusic) Remove(ctx context.Context, obj model.Obj) error { } func (d *NeteaseMusic) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - return d.putSongStream(stream) + return d.putSongStream(ctx, stream, up) } func (d *NeteaseMusic) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { diff --git a/drivers/netease_music/types.go b/drivers/netease_music/types.go index edbd40ee..93ecdf70 100644 --- a/drivers/netease_music/types.go +++ b/drivers/netease_music/types.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/http_range" @@ -27,8 +28,8 @@ type SongResp struct { } type ListResp struct { - Size string `json:"size"` - MaxSize string `json:"maxSize"` + Size int64 `json:"size"` + MaxSize int64 `json:"maxSize"` Data []struct { AddTime int64 `json:"addTime"` FileName string `json:"fileName"` @@ -64,7 +65,6 @@ func (lrc *LyricObj) getLyricLink() *model.Link { sr := io.NewSectionReader(reader, httpRange.Start, httpRange.Length) return io.NopCloser(sr), nil }, - Closers: utils.EmptyClosers(), }, } } @@ -72,6 +72,8 @@ func (lrc *LyricObj) getLyricLink() *model.Link { type ReqOption struct { crypto string stream model.FileStreamer + up driver.UpdateProgress + ctx context.Context data map[string]string headers map[string]string cookies []*http.Cookie diff --git a/drivers/netease_music/upload.go b/drivers/netease_music/upload.go index ece496b3..3ff6216b 100644 --- a/drivers/netease_music/upload.go +++ b/drivers/netease_music/upload.go @@ -1,8 +1,10 @@ package netease_music import ( + "context" "crypto/md5" "encoding/hex" + "github.com/alist-org/alist/v3/internal/driver" "io" "net/http" "strconv" @@ -47,9 +49,12 @@ func (u *uploader) init(stream model.FileStreamer) error { } h := md5.New() - io.Copy(h, stream) + _, err := utils.CopyWithBuffer(h, stream) + if err != nil { + return err + } u.md5 = hex.EncodeToString(h.Sum(nil)) - _, err := u.file.Seek(0, io.SeekStart) + _, err = u.file.Seek(0, io.SeekStart) if err != nil { return err } @@ -167,7 +172,7 @@ func (u *uploader) publishInfo(resourceId string) error { return nil } -func (u *uploader) upload(stream model.FileStreamer) error { +func (u *uploader) upload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error { bucket := "jd-musicrep-privatecloud-audio-public" token, err := u.allocToken(bucket) if err != nil { @@ -192,6 +197,8 @@ func (u *uploader) upload(stream model.FileStreamer) error { http.MethodPost, ReqOption{ stream: stream, + up: up, + ctx: ctx, headers: map[string]string{ "x-nos-token": token.token, "Content-Type": "audio/mpeg", diff --git a/drivers/netease_music/util.go b/drivers/netease_music/util.go index 4d0696eb..21718106 100644 --- a/drivers/netease_music/util.go +++ b/drivers/netease_music/util.go @@ -1,7 +1,7 @@ package netease_music import ( - "io" + "context" "net/http" "path" "regexp" @@ -10,6 +10,7 @@ import ( "time" "github.com/alist-org/alist/v3/drivers/base" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" @@ -58,20 +59,35 @@ func (d *NeteaseMusic) request(url, method string, opt ReqOption) ([]byte, error url = "https://music.163.com/api/linux/forward" } + if opt.ctx != nil { + req.SetContext(opt.ctx) + } if method == http.MethodPost { if opt.stream != nil { + if opt.up == nil { + opt.up = func(_ float64) {} + } req.SetContentLength(true) - req.SetBody(io.ReadCloser(opt.stream)) + req.SetBody(driver.NewLimitedUploadStream(opt.ctx, &driver.ReaderUpdatingProgress{ + Reader: opt.stream, + UpdateProgress: opt.up, + })) } else { req.SetFormData(data) } res, err := req.Post(url) - return res.Body(), err + if err != nil { + return nil, err + } + return res.Body(), nil } if method == http.MethodGet { res, err := req.Get(url) - return res.Body(), err + if err != nil { + return nil, err + } + return res.Body(), nil } return nil, errs.NotImplement @@ -206,12 +222,11 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error { return err } -func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error { +func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error { tmp, err := stream.CacheFullInTempFile() if err != nil { return err } - defer tmp.Close() u := uploader{driver: d, file: tmp} @@ -231,7 +246,7 @@ func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error { } if u.meta.needUpload { - err = u.upload(stream) + err = u.upload(ctx, stream, up) if err != nil { return err } diff --git a/drivers/onedrive/util.go b/drivers/onedrive/util.go index 9ee2dae9..28ed5ccc 100644 --- a/drivers/onedrive/util.go +++ b/drivers/onedrive/util.go @@ -8,7 +8,7 @@ import ( "io" "net/http" stdpath "path" - "strconv" + "time" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" @@ -18,7 +18,6 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" jsoniter "github.com/json-iterator/go" - log "github.com/sirupsen/logrus" ) var onedriveHostMap = map[string]Host{ @@ -127,7 +126,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, func (d *Onedrive) getFiles(path string) ([]File, error) { var res []File - nextLink := d.GetMetaUrl(false, path) + "/children?$top=5000&$expand=thumbnails($select=medium)&$select=id,name,size,fileSystemInfo,content.downloadUrl,file,parentReference" + nextLink := d.GetMetaUrl(false, path) + "/children?$top=1000&$expand=thumbnails($select=medium)&$select=id,name,size,fileSystemInfo,content.downloadUrl,file,parentReference" for nextLink != "" { var files Files _, err := d.Request(nextLink, http.MethodGet, nil, &files) @@ -152,12 +151,8 @@ func (d *Onedrive) upSmall(ctx context.Context, dstDir model.Obj, stream model.F // 1. upload new file // ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online url := d.GetMetaUrl(false, filepath) + "/content" - data, err := io.ReadAll(stream) - if err != nil { - return err - } - _, err = d.Request(url, http.MethodPut, func(req *resty.Request) { - req.SetBody(data).SetContext(ctx) + _, err := d.Request(url, http.MethodPut, func(req *resty.Request) { + req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx) }, nil) if err != nil { return fmt.Errorf("onedrive: Failed to upload new file(path=%v): %w", filepath, err) @@ -209,42 +204,54 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil uploadUrl := jsoniter.Get(res, "uploadUrl").ToString() var finish int64 = 0 DEFAULT := d.ChunkSize * 1024 * 1024 + retryCount := 0 + maxRetries := 3 for finish < stream.GetSize() { if utils.IsCanceled(ctx) { return ctx.Err() } - log.Debugf("upload: %d", finish) - var byteSize int64 = DEFAULT left := stream.GetSize() - finish - if left < DEFAULT { - byteSize = left - } + byteSize := min(left, DEFAULT) + utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()) byteData := make([]byte, byteSize) n, err := io.ReadFull(stream, byteData) - log.Debug(err, n) + utils.Log.Debug(err, n) if err != nil { return err } - req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData)) + req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) if err != nil { return err } req = req.WithContext(ctx) - req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) + req.ContentLength = byteSize + // req.Header.Set("Content-Length", strconv.Itoa(int(byteSize))) req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())) - finish += byteSize res, err := base.HttpClient.Do(req) if err != nil { return err } // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession - if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 { + switch { + case res.StatusCode >= 500 && res.StatusCode <= 504: + retryCount++ + if retryCount > maxRetries { + res.Body.Close() + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1<= 500 && res.StatusCode <= 504: + retryCount++ + if retryCount > maxRetries { + res.Body.Close() + return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode) + } + backoff := time.Duration(1< 0 { + _, err := streamPkg.CacheFullInTempFileAndWriter(stream, io.MultiWriter(writers...)) + if err != nil { + return err + } + if md5 != nil { + md5Str = hex.EncodeToString(md5.Sum(nil)) + } + if sha1 != nil { + sha1Str = hex.EncodeToString(sha1.Sum(nil)) + } } - md5Str := hex.EncodeToString(m.Sum(nil)) - s := sha1.New() - _, err = utils.CopyWithBuffer(s, tempFile) - if err != nil { - return err - } - _, err = tempFile.Seek(0, io.SeekStart) - if err != nil { - return err - } - sha1Str := hex.EncodeToString(s.Sum(nil)) // pre pre, err := d.upPre(stream, dstDir.GetID()) if err != nil { @@ -177,29 +178,31 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File return nil } // part up - partSize := pre.Metadata.PartSize - var bytes []byte - md5s := make([]string, 0) - defaultBytes := make([]byte, partSize) total := stream.GetSize() left := total + partSize := int64(pre.Metadata.PartSize) + part := make([]byte, partSize) + count := int(total / partSize) + if total%partSize > 0 { + count++ + } + md5s := make([]string, 0, count) partNumber := 1 for left > 0 { if utils.IsCanceled(ctx) { return ctx.Err() } - if left > int64(partSize) { - bytes = defaultBytes - } else { - bytes = make([]byte, left) + if left < partSize { + part = part[:left] } - _, err := io.ReadFull(tempFile, bytes) + n, err := io.ReadFull(stream, part) if err != nil { return err } - left -= int64(len(bytes)) + left -= int64(n) log.Debugf("left: %d", left) - m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, bytes) + reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part)) + m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader) //m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str) if err != nil { return err diff --git a/drivers/quark_uc/util.go b/drivers/quark_uc/util.go index df27af67..c5845cc6 100644 --- a/drivers/quark_uc/util.go +++ b/drivers/quark_uc/util.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "errors" "fmt" + "io" "net/http" "strconv" "strings" @@ -119,7 +120,7 @@ func (d *QuarkOrUC) upHash(md5, sha1, taskId string) (bool, error) { return resp.Data.Finish, err } -func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes []byte) (string, error) { +func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes io.Reader) (string, error) { //func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) { timeStr := time.Now().UTC().Format(http.TimeFormat) data := base.Json{ @@ -163,10 +164,13 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit "partNumber": strconv.Itoa(partNumber), "uploadId": pre.Data.UploadId, }).SetBody(bytes).Put(u) + if err != nil { + return "", err + } if res.StatusCode() != 200 { return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String()) } - return res.Header().Get("ETag"), nil + return res.Header().Get("Etag"), nil } func (d *QuarkOrUC) upCommit(pre UpPreResp, md5s []string) error { @@ -230,6 +234,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit SetQueryParams(map[string]string{ "uploadId": pre.Data.UploadId, }).SetBody(body).Post(u) + if err != nil { + return err + } if res.StatusCode() != 200 { return fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String()) } diff --git a/drivers/quark_uc_tv/driver.go b/drivers/quark_uc_tv/driver.go index ff7ccf20..a857e2dd 100644 --- a/drivers/quark_uc_tv/driver.go +++ b/drivers/quark_uc_tv/driver.go @@ -125,7 +125,6 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs } func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { - files := &model.Link{} var fileLink FileLink _, err := d.request(ctx, "/file", "GET", func(req *resty.Request) { req.SetQueryParams(map[string]string{ @@ -139,8 +138,12 @@ func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArg if err != nil { return nil, err } - files.URL = fileLink.Data.DownloadURL - return files, nil + + return &model.Link{ + URL: fileLink.Data.DownloadURL, + Concurrency: 3, + PartSize: 10 * utils.MB, + }, nil } func (d *QuarkUCTV) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { diff --git a/drivers/quqi/driver.go b/drivers/quqi/driver.go index 51e54981..36758bd1 100644 --- a/drivers/quqi/driver.go +++ b/drivers/quqi/driver.go @@ -3,6 +3,7 @@ package quqi import ( "bytes" "context" + "errors" "io" "strconv" "strings" @@ -315,7 +316,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea // if the file already exists in Quqi server, there is no need to actually upload it if uploadInitResp.Data.Exist { // the file name returned by Quqi does not include the extension name - nodeName, nodeExt := uploadInitResp.Data.NodeName, rawExt(stream.GetName()) + nodeName, nodeExt := uploadInitResp.Data.NodeName, utils.Ext(stream.GetName()) if nodeExt != "" { nodeName = nodeName + "." + nodeExt } @@ -385,20 +386,34 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea } uploader := s3manager.NewUploader(s) buf := make([]byte, 1024*1024*2) + fup := &driver.ReaderUpdatingProgress{ + Reader: &driver.SimpleReaderWithSize{ + Reader: f, + Size: int64(len(buf)), + }, + UpdateProgress: up, + } for partNumber := int64(1); ; partNumber++ { - n, err := io.ReadFull(f, buf) - if err != nil && err != io.ErrUnexpectedEOF { + n, err := io.ReadFull(fup, buf) + if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) { if err == io.EOF { break } return nil, err } + reader := bytes.NewReader(buf[:n]) _, err = uploader.S3.UploadPartWithContext(ctx, &s3.UploadPartInput{ UploadId: &uploadInitResp.Data.UploadID, Key: &uploadInitResp.Data.Key, Bucket: &uploadInitResp.Data.Bucket, PartNumber: aws.Int64(partNumber), - Body: bytes.NewReader(buf[:n]), + Body: struct { + *driver.RateLimitReader + io.Seeker + }{ + RateLimitReader: driver.NewLimitedUploadStream(ctx, reader), + Seeker: reader, + }, }) if err != nil { return nil, err @@ -417,7 +432,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea return nil, err } // the file name returned by Quqi does not include the extension name - nodeName, nodeExt := uploadFinishResp.Data.NodeName, rawExt(stream.GetName()) + nodeName, nodeExt := uploadFinishResp.Data.NodeName, utils.Ext(stream.GetName()) if nodeExt != "" { nodeName = nodeName + "." + nodeExt } diff --git a/drivers/quqi/util.go b/drivers/quqi/util.go index c025f6ee..aa184d70 100644 --- a/drivers/quqi/util.go +++ b/drivers/quqi/util.go @@ -9,7 +9,6 @@ import ( "io" "net/http" "net/url" - stdpath "path" "strings" "time" @@ -115,16 +114,6 @@ func (d *Quqi) checkLogin() bool { return true } -// rawExt 保留扩展名大小写 -func rawExt(name string) string { - ext := stdpath.Ext(name) - if strings.HasPrefix(ext, ".") { - ext = ext[1:] - } - - return ext -} - // decryptKey 获取密码 func decryptKey(encodeKey string) []byte { // 移除非法字符 @@ -300,16 +289,10 @@ func (d *Quqi) linkFromCDN(id string) (*model.Link, error) { bufferReader := bufio.NewReader(decryptReader) bufferReader.Discard(int(decryptedOffset)) - return utils.NewReadCloser(bufferReader, func() error { - return nil - }), nil + return io.NopCloser(bufferReader), nil } return &model.Link{ - Header: http.Header{ - "Origin": []string{"https://quqi.com"}, - "Cookie": []string{d.Cookie}, - }, RangeReadCloser: &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}, Expiration: &expiration, }, nil diff --git a/drivers/s3/driver.go b/drivers/s3/driver.go index 2b72d789..b7411489 100644 --- a/drivers/s3/driver.go +++ b/drivers/s3/driver.go @@ -4,18 +4,17 @@ import ( "bytes" "context" "fmt" - "github.com/alist-org/alist/v3/server/common" "io" "net/url" stdpath "path" "strings" "time" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/pkg/cron" - "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/cron" + "github.com/alist-org/alist/v3/server/common" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" @@ -99,8 +98,12 @@ func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*mo var link model.Link var err error if d.CustomHost != "" { - err = req.Build() - link.URL = req.HTTPRequest.URL.String() + if d.EnableCustomHostPresign { + link.URL, err = req.Presign(time.Hour * time.Duration(d.SignURLExpire)) + } else { + err = req.Build() + link.URL = req.HTTPRequest.URL.String() + } if d.RemoveBucket { link.URL = strings.Replace(link.URL, "/"+d.Bucket, "", 1) } @@ -159,18 +162,21 @@ func (d *S3) Remove(ctx context.Context, obj model.Obj) error { return d.removeFile(obj.GetPath()) } -func (d *S3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { uploader := s3manager.NewUploader(d.Session) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if s.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = s.GetSize() / (s3manager.MaxUploadParts - 1) } - key := getKey(stdpath.Join(dstDir.GetPath(), stream.GetName()), false) - contentType := stream.GetMimetype() + key := getKey(stdpath.Join(dstDir.GetPath(), s.GetName()), false) + contentType := s.GetMimetype() log.Debugln("key:", key) input := &s3manager.UploadInput{ - Bucket: &d.Bucket, - Key: &key, - Body: stream, + Bucket: &d.Bucket, + Key: &key, + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }), ContentType: &contentType, } _, err := uploader.UploadWithContext(ctx, input) diff --git a/drivers/s3/meta.go b/drivers/s3/meta.go index 4436c615..4de4b60a 100644 --- a/drivers/s3/meta.go +++ b/drivers/s3/meta.go @@ -14,6 +14,7 @@ type Addition struct { SecretAccessKey string `json:"secret_access_key" required:"true"` SessionToken string `json:"session_token"` CustomHost string `json:"custom_host"` + EnableCustomHostPresign bool `json:"enable_custom_host_presign"` SignURLExpire int `json:"sign_url_expire" type:"number" default:"4"` Placeholder string `json:"placeholder"` ForcePathStyle bool `json:"force_path_style"` diff --git a/drivers/s3/util.go b/drivers/s3/util.go index 31e658bd..e02945a0 100644 --- a/drivers/s3/util.go +++ b/drivers/s3/util.go @@ -4,6 +4,7 @@ import ( "context" "errors" "net/http" + "net/url" "path" "strings" @@ -198,7 +199,7 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error { dstKey := getKey(dst, false) input := &s3.CopyObjectInput{ Bucket: &d.Bucket, - CopySource: aws.String("/" + d.Bucket + "/" + srcKey), + CopySource: aws.String(url.PathEscape(d.Bucket + "/" + srcKey)), Key: &dstKey, } _, err := d.client.CopyObject(input) diff --git a/drivers/seafile/driver.go b/drivers/seafile/driver.go index 6d1f16da..239f57dd 100644 --- a/drivers/seafile/driver.go +++ b/drivers/seafile/driver.go @@ -197,7 +197,7 @@ func (d *Seafile) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { repo, path, err := d.getRepoAndPath(dstDir.GetPath()) if err != nil { return err @@ -214,11 +214,16 @@ func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt u := string(res) u = u[1 : len(u)-1] // remove quotes _, err = d.request(http.MethodPost, u, func(req *resty.Request) { - req.SetFileReader("file", stream.GetName(), stream). + r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) + req.SetFileReader("file", s.GetName(), r). SetFormData(map[string]string{ "parent_dir": path, "replace": "1", - }) + }). + SetContext(ctx) }) return err } diff --git a/drivers/sftp/driver.go b/drivers/sftp/driver.go index 1f216598..7498ce39 100644 --- a/drivers/sftp/driver.go +++ b/drivers/sftp/driver.go @@ -111,7 +111,7 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea defer func() { _ = dstFile.Close() }() - err = utils.CopyWithCtx(ctx, dstFile, stream, stream.GetSize(), up) + err = utils.CopyWithCtx(ctx, dstFile, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up) return err } diff --git a/drivers/smb/driver.go b/drivers/smb/driver.go index 9632f24e..c292e92e 100644 --- a/drivers/smb/driver.go +++ b/drivers/smb/driver.go @@ -186,7 +186,7 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream _ = d.fs.Remove(fullPath) } }() - err = utils.CopyWithCtx(ctx, out, stream, stream.GetSize(), up) + err = utils.CopyWithCtx(ctx, out, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up) if err != nil { return err } diff --git a/drivers/teambition/driver.go b/drivers/teambition/driver.go index c75d2ac0..b37c324b 100644 --- a/drivers/teambition/driver.go +++ b/drivers/teambition/driver.go @@ -148,7 +148,7 @@ func (d *Teambition) Put(ctx context.Context, dstDir model.Obj, stream model.Fil var newFile *FileUpload if stream.GetSize() <= 20971520 { // post upload - newFile, err = d.upload(ctx, stream, token) + newFile, err = d.upload(ctx, stream, token, up) } else { // chunk upload //err = base.ErrNotImplement diff --git a/drivers/teambition/util.go b/drivers/teambition/util.go index 181cc58f..01c12cb1 100644 --- a/drivers/teambition/util.go +++ b/drivers/teambition/util.go @@ -1,6 +1,7 @@ package teambition import ( + "bytes" "context" "errors" "fmt" @@ -120,11 +121,15 @@ func (d *Teambition) getFiles(parentId string) ([]model.Obj, error) { return files, nil } -func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string) (*FileUpload, error) { +func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string, up driver.UpdateProgress) (*FileUpload, error) { prefix := "tcs" if d.isInternational() { prefix = "us-tcs" } + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + }) var newFile FileUpload res, err := base.RestyClient.R(). SetContext(ctx). @@ -134,7 +139,8 @@ func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token "type": file.GetMimetype(), "size": strconv.FormatInt(file.GetSize(), 10), "lastModifiedDate": time.Now().Format("Mon Jan 02 2006 15:04:05 GMT+0800 (中国标准时间)"), - }).SetMultipartField("file", file.GetName(), file.GetMimetype(), file). + }). + SetMultipartField("file", file.GetName(), file.GetMimetype(), reader). Post(fmt.Sprintf("https://%s.teambition.net/upload", prefix)) if err != nil { return nil, err @@ -183,10 +189,9 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t "Authorization": token, "Content-Type": "application/octet-stream", "Referer": referer, - }).SetBody(chunkData).Post(u) - if err != nil { - return nil, err - } + }). + SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(chunkData))). + Post(u) if err != nil { return nil, err } @@ -252,7 +257,10 @@ func (d *Teambition) newUpload(ctx context.Context, dstDir model.Obj, stream mod Key: &uploadToken.Upload.Key, ContentDisposition: &uploadToken.Upload.ContentDisposition, ContentType: &uploadToken.Upload.ContentType, - Body: stream, + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: stream, + UpdateProgress: up, + }), } _, err = uploader.UploadWithContext(ctx, input) if err != nil { diff --git a/drivers/template/driver.go b/drivers/template/driver.go index 439f57f3..ff3648db 100644 --- a/drivers/template/driver.go +++ b/drivers/template/driver.go @@ -66,11 +66,33 @@ func (d *Template) Remove(ctx context.Context, obj model.Obj) error { return errs.NotImplement } -func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { +func (d *Template) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { // TODO upload file, optional return nil, errs.NotImplement } +func (d *Template) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { + // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Template) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { + // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Template) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) { + // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional + return nil, errs.NotImplement +} + +func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) { + // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional + // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir + // return errs.NotImplement to use an internal archive tool + return nil, errs.NotImplement +} + //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/terabox/driver.go b/drivers/terabox/driver.go index c9662fce..82962b81 100644 --- a/drivers/terabox/driver.go +++ b/drivers/terabox/driver.go @@ -10,7 +10,6 @@ import ( "math" stdpath "path" "strconv" - "strings" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/pkg/utils" @@ -23,7 +22,9 @@ import ( type Terabox struct { model.Storage Addition - JsToken string + JsToken string + url_domain_prefix string + base_url string } func (d *Terabox) Config() driver.Config { @@ -36,6 +37,8 @@ func (d *Terabox) GetAddition() driver.Additional { func (d *Terabox) Init(ctx context.Context) error { var resp CheckLoginResp + d.base_url = "https://www.terabox.com" + d.url_domain_prefix = "jp" _, err := d.get("/api/check/login", nil, &resp) if err != nil { return err @@ -71,7 +74,16 @@ func (d *Terabox) Link(ctx context.Context, file model.Obj, args model.LinkArgs) } func (d *Terabox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { - _, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "") + params := map[string]string{ + "a": "commit", + } + data := map[string]string{ + "path": stdpath.Join(parentDir.GetPath(), dirName), + "isdir": "1", + "block_list": "[]", + } + res, err := d.post_form("/api/create", params, data, nil) + log.Debugln(string(res)) return err } @@ -117,63 +129,61 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error { } func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - tempFile, err := stream.CacheFullInTempFile() + resp, err := base.RestyClient.R(). + SetContext(ctx). + Get("https://" + d.url_domain_prefix + "-data.terabox.com/rest/2.0/pcs/file?method=locateupload") if err != nil { return err } - var Default int64 = 4 * 1024 * 1024 - defaultByteData := make([]byte, Default) - count := int(math.Ceil(float64(stream.GetSize()) / float64(Default))) - // cal md5 - h1 := md5.New() - h2 := md5.New() - block_list := make([]string, 0) - left := stream.GetSize() - for i := 0; i < count; i++ { - byteSize := Default - var byteData []byte - if left < Default { - byteSize = left - byteData = make([]byte, byteSize) - } else { - byteData = defaultByteData - } - left -= byteSize - _, err = io.ReadFull(tempFile, byteData) - if err != nil { - return err - } - h1.Write(byteData) - h2.Write(byteData) - block_list = append(block_list, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil)))) - h2.Reset() - } - - _, err = tempFile.Seek(0, io.SeekStart) + var locateupload_resp LocateUploadResp + err = utils.Json.Unmarshal(resp.Body(), &locateupload_resp) if err != nil { + log.Debugln(resp) return err } + log.Debugln(locateupload_resp) + // precreate file rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName()) path := encodeURIComponent(rawPath) - block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ",")) - data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s", - path, stream.GetSize(), - block_list_str) - params := map[string]string{} + + var precreateBlockListStr string + if stream.GetSize() > initialChunkSize { + precreateBlockListStr = `["5910a591dd8fc18c32a8f3df4fdc1761","a5fc157d78e6ad1c7e114b056c92821e"]` + } else { + precreateBlockListStr = `["5910a591dd8fc18c32a8f3df4fdc1761"]` + } + + data := map[string]string{ + "path": rawPath, + "autoinit": "1", + "target_path": dstDir.GetPath(), + "block_list": precreateBlockListStr, + "local_mtime": strconv.FormatInt(stream.ModTime().Unix(), 10), + "file_limit_switch_v34": "true", + } var precreateResp PrecreateResp - _, err = d.post("/api/precreate", params, data, &precreateResp) + log.Debugln(data) + res, err := d.post_form("/api/precreate", nil, data, &precreateResp) if err != nil { return err } log.Debugf("%+v", precreateResp) if precreateResp.Errno != 0 { + log.Debugln(string(res)) return fmt.Errorf("[terabox] failed to precreate file, errno: %d", precreateResp.Errno) } if precreateResp.ReturnType == 2 { return nil } - params = map[string]string{ + + // upload chunks + tempFile, err := stream.CacheFullInTempFile() + if err != nil { + return err + } + + params := map[string]string{ "method": "upload", "path": path, "uploadid": precreateResp.Uploadid, @@ -182,42 +192,82 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt "channel": "dubox", "clienttype": "0", } - left = stream.GetSize() - for i, partseq := range precreateResp.BlockList { + + streamSize := stream.GetSize() + chunkSize := calculateChunkSize(streamSize) + chunkByteData := make([]byte, chunkSize) + count := int(math.Ceil(float64(streamSize) / float64(chunkSize))) + left := streamSize + uploadBlockList := make([]string, 0, count) + h := md5.New() + for partseq := 0; partseq < count; partseq++ { if utils.IsCanceled(ctx) { return ctx.Err() } - byteSize := Default + byteSize := chunkSize var byteData []byte - if left < Default { + if left >= chunkSize { + byteData = chunkByteData + } else { byteSize = left byteData = make([]byte, byteSize) - } else { - byteData = defaultByteData } left -= byteSize _, err = io.ReadFull(tempFile, byteData) if err != nil { return err } - u := "https://c-jp.terabox.com/rest/2.0/pcs/superfile2" + + // calculate md5 + h.Write(byteData) + uploadBlockList = append(uploadBlockList, hex.EncodeToString(h.Sum(nil))) + h.Reset() + + u := "https://" + locateupload_resp.Host + "/rest/2.0/pcs/superfile2" params["partseq"] = strconv.Itoa(partseq) res, err := base.RestyClient.R(). SetContext(ctx). SetQueryParams(params). - SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)). + SetFileReader("file", stream.GetName(), driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))). SetHeader("Cookie", d.Cookie). Post(u) if err != nil { return err } log.Debugln(res.String()) - if len(precreateResp.BlockList) > 0 { - up(float64(i) * 100 / float64(len(precreateResp.BlockList))) + if count > 0 { + up(float64(partseq) * 100 / float64(count)) } } - _, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str) - return err + + // create file + params = map[string]string{ + "isdir": "0", + "rtype": "1", + } + + uploadBlockListStr, err := utils.Json.MarshalToString(uploadBlockList) + if err != nil { + return err + } + data = map[string]string{ + "path": rawPath, + "size": strconv.FormatInt(stream.GetSize(), 10), + "uploadid": precreateResp.Uploadid, + "target_path": dstDir.GetPath(), + "block_list": uploadBlockListStr, + "local_mtime": strconv.FormatInt(stream.ModTime().Unix(), 10), + } + var createResp CreateResp + res, err = d.post_form("/api/create", params, data, &createResp) + log.Debugln(string(res)) + if err != nil { + return err + } + if createResp.Errno != 0 { + return fmt.Errorf("[terabox] failed to create file, errno: %d", createResp.Errno) + } + return nil } var _ driver.Driver = (*Terabox)(nil) diff --git a/drivers/terabox/types.go b/drivers/terabox/types.go index 890d5305..f4d50dde 100644 --- a/drivers/terabox/types.go +++ b/drivers/terabox/types.go @@ -95,3 +95,11 @@ type PrecreateResp struct { type CheckLoginResp struct { Errno int `json:"errno"` } + +type LocateUploadResp struct { + Host string `json:"host"` +} + +type CreateResp struct { + Errno int `json:"errno"` +} diff --git a/drivers/terabox/util.go b/drivers/terabox/util.go index 0a4e7879..058eecd6 100644 --- a/drivers/terabox/util.go +++ b/drivers/terabox/util.go @@ -14,6 +14,12 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/utils" "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" +) + +const ( + initialChunkSize int64 = 4 << 20 // 4MB + initialSizeThreshold int64 = 4 << 30 // 4GB ) func getStrBetween(raw, start, end string) string { @@ -28,11 +34,11 @@ func getStrBetween(raw, start, end string) string { } func (d *Terabox) resetJsToken() error { - u := "https://www.terabox.com/main" + u := d.base_url res, err := base.RestyClient.R().SetHeaders(map[string]string{ "Cookie": d.Cookie, "Accept": "application/json, text/plain, */*", - "Referer": "https://www.terabox.com/", + "Referer": d.base_url, "User-Agent": base.UserAgent, "X-Requested-With": "XMLHttpRequest", }).Get(u) @@ -48,12 +54,12 @@ func (d *Terabox) resetJsToken() error { return nil } -func (d *Terabox) request(furl string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) { +func (d *Terabox) request(rurl string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) { req := base.RestyClient.R() req.SetHeaders(map[string]string{ "Cookie": d.Cookie, "Accept": "application/json, text/plain, */*", - "Referer": "https://www.terabox.com/", + "Referer": d.base_url, "User-Agent": base.UserAgent, "X-Requested-With": "XMLHttpRequest", }) @@ -70,7 +76,7 @@ func (d *Terabox) request(furl string, method string, callback base.ReqCallback, if resp != nil { req.SetResult(resp) } - res, err := req.Execute(method, furl) + res, err := req.Execute(method, d.base_url+rurl) if err != nil { return nil, err } @@ -82,14 +88,24 @@ func (d *Terabox) request(furl string, method string, callback base.ReqCallback, return nil, err } if !utils.IsBool(noRetry...) { - return d.request(furl, method, callback, resp, true) + return d.request(rurl, method, callback, resp, true) + } + } else if errno == -6 { + header := res.Header() + log.Debugln(header) + urlDomainPrefix := header.Get("Url-Domain-Prefix") + if len(urlDomainPrefix) > 0 { + d.url_domain_prefix = urlDomainPrefix + d.base_url = "https://" + d.url_domain_prefix + ".terabox.com" + log.Debugln("Redirect base_url to", d.base_url) + return d.request(rurl, method, callback, resp, noRetry...) } } return res.Body(), nil } func (d *Terabox) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) { - return d.request("https://www.terabox.com"+pathname, http.MethodGet, func(req *resty.Request) { + return d.request(pathname, http.MethodGet, func(req *resty.Request) { if params != nil { req.SetQueryParams(params) } @@ -97,7 +113,7 @@ func (d *Terabox) get(pathname string, params map[string]string, resp interface{ } func (d *Terabox) post(pathname string, params map[string]string, data interface{}, resp interface{}) ([]byte, error) { - return d.request("https://www.terabox.com"+pathname, http.MethodPost, func(req *resty.Request) { + return d.request(pathname, http.MethodPost, func(req *resty.Request) { if params != nil { req.SetQueryParams(params) } @@ -105,6 +121,15 @@ func (d *Terabox) post(pathname string, params map[string]string, data interface }, resp) } +func (d *Terabox) post_form(pathname string, params map[string]string, data map[string]string, resp interface{}) ([]byte, error) { + return d.request(pathname, http.MethodPost, func(req *resty.Request) { + if params != nil { + req.SetQueryParams(params) + } + req.SetFormData(data) + }, resp) +} + func (d *Terabox) getFiles(dir string) ([]File, error) { page := 1 num := 100 @@ -237,17 +262,24 @@ func (d *Terabox) manage(opera string, filelist interface{}) ([]byte, error) { return d.post("/api/filemanager", params, data, nil) } -func (d *Terabox) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) { - params := map[string]string{} - data := fmt.Sprintf("path=%s&size=%d&isdir=%d", encodeURIComponent(path), size, isdir) - if uploadid != "" { - data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list) - } - return d.post("/api/create", params, data, nil) -} - func encodeURIComponent(str string) string { r := url.QueryEscape(str) r = strings.ReplaceAll(r, "+", "%20") return r } + +func calculateChunkSize(streamSize int64) int64 { + chunkSize := initialChunkSize + sizeThreshold := initialSizeThreshold + + if streamSize < chunkSize { + return streamSize + } + + for streamSize > sizeThreshold { + chunkSize <<= 1 + sizeThreshold <<= 1 + } + + return chunkSize +} diff --git a/drivers/thunder/driver.go b/drivers/thunder/driver.go index 9ba5dd82..1d2f2a81 100644 --- a/drivers/thunder/driver.go +++ b/drivers/thunder/driver.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "strconv" "strings" "github.com/alist-org/alist/v3/drivers/base" @@ -11,6 +12,7 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/aws/aws-sdk-go/aws" @@ -43,26 +45,29 @@ func (x *Thunder) Init(ctx context.Context) (err error) { Common: &Common{ client: base.NewRestyClient(), Algorithms: []string{ - "HPxr4BVygTQVtQkIMwQH33ywbgYG5l4JoR", - "GzhNkZ8pOBsCY+7", - "v+l0ImTpG7c7/", - "e5ztohgVXNP", - "t", - "EbXUWyVVqQbQX39Mbjn2geok3/0WEkAVxeqhtx857++kjJiRheP8l77gO", - "o7dvYgbRMOpHXxCs", - "6MW8TD8DphmakaxCqVrfv7NReRRN7ck3KLnXBculD58MvxjFRqT+", - "kmo0HxCKVfmxoZswLB4bVA/dwqbVAYghSb", - "j", - "4scKJNdd7F27Hv7tbt", + "9uJNVj/wLmdwKrJaVj/omlQ", + "Oz64Lp0GigmChHMf/6TNfxx7O9PyopcczMsnf", + "Eb+L7Ce+Ej48u", + "jKY0", + "ASr0zCl6v8W4aidjPK5KHd1Lq3t+vBFf41dqv5+fnOd", + "wQlozdg6r1qxh0eRmt3QgNXOvSZO6q/GXK", + "gmirk+ciAvIgA/cxUUCema47jr/YToixTT+Q6O", + "5IiCoM9B1/788ntB", + "P07JH0h6qoM6TSUAK2aL9T5s2QBVeY9JWvalf", + "+oK0AN", }, - DeviceID: utils.GetMD5EncodeStr(x.Username + x.Password), + DeviceID: func() string { + if len(x.DeviceID) != 32 { + return utils.GetMD5EncodeStr(x.DeviceID) + } + return x.DeviceID + }(), ClientID: "Xp6vsxz_7IYVw2BB", ClientSecret: "Xp6vsy4tN9toTVdMSpomVdXpRmES", - ClientVersion: "7.51.0.8196", + ClientVersion: "8.31.0.9726", PackageName: "com.xunlei.downloadprovider", - UserAgent: "ANDROID-com.xunlei.downloadprovider/7.51.0.8196 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/220200 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)", + UserAgent: "ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)", DownloadUserAgent: "Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)", - refreshCTokenCk: func(token string) { x.CaptchaToken = token op.MustSaveDriverStorage(x) @@ -78,6 +83,8 @@ func (x *Thunder) Init(ctx context.Context) (err error) { x.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error())) op.MustSaveDriverStorage(x) } + // 清空 信任密钥 + x.Addition.CreditKey = "" } x.SetTokenResp(token) return err @@ -91,6 +98,17 @@ func (x *Thunder) Init(ctx context.Context) (err error) { x.SetCaptchaToken(ctoekn) } + if x.Addition.CreditKey != "" { + x.SetCreditKey(x.Addition.CreditKey) + } + + if x.Addition.DeviceID != "" { + x.Common.DeviceID = x.Addition.DeviceID + } else { + x.Addition.DeviceID = x.Common.DeviceID + op.MustSaveDriverStorage(x) + } + // 防止重复登录 identity := x.GetIdentity() if x.identity != identity || !x.IsLogin() { @@ -100,6 +118,8 @@ func (x *Thunder) Init(ctx context.Context) (err error) { if err != nil { return err } + // 清空 信任密钥 + x.Addition.CreditKey = "" x.SetTokenResp(token) } return nil @@ -159,6 +179,17 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) { x.SetCaptchaToken(x.CaptchaToken) } + if x.ExpertAddition.CreditKey != "" { + x.SetCreditKey(x.ExpertAddition.CreditKey) + } + + if x.ExpertAddition.DeviceID != "" { + x.Common.DeviceID = x.ExpertAddition.DeviceID + } else { + x.ExpertAddition.DeviceID = x.Common.DeviceID + op.MustSaveDriverStorage(x) + } + // 签名方法 if x.SignType == "captcha_sign" { x.Common.Timestamp = x.Timestamp @@ -192,6 +223,8 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) { if err != nil { return err } + // 清空 信任密钥 + x.ExpertAddition.CreditKey = "" x.SetTokenResp(token) x.SetRefreshTokenFunc(func() error { token, err := x.XunLeiCommon.RefreshToken(x.TokenResp.RefreshToken) @@ -200,6 +233,8 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) { if err != nil { x.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error())) } + // 清空 信任密钥 + x.ExpertAddition.CreditKey = "" } x.SetTokenResp(token) op.MustSaveDriverStorage(x) @@ -231,7 +266,8 @@ func (x *ThunderExpert) SetTokenResp(token *TokenResp) { type XunLeiCommon struct { *Common - *TokenResp // 登录信息 + *TokenResp // 登录信息 + *CoreLoginResp // core登录信息 refreshTokenFunc func() error } @@ -331,29 +367,24 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error { return err } -func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - hi := stream.GetHash() - gcid := hi.GetHash(hash_extend.GCID) +func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + gcid := file.GetHash().GetHash(hash_extend.GCID) + var err error if len(gcid) < hash_extend.GCID.Width { - tFile, err := stream.CacheFullInTempFile() - if err != nil { - return err - } - - gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize()) + _, gcid, err = stream.CacheFullInTempFileAndHash(file, hash_extend.GCID, file.GetSize()) if err != nil { return err } } var resp UploadTaskResponse - _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { + _, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { r.SetContext(ctx) r.SetBody(&base.Json{ "kind": FILE, "parent_id": dstDir.GetID(), - "name": stream.GetName(), - "size": stream.GetSize(), + "name": file.GetName(), + "size": file.GetSize(), "hash": gcid, "upload_type": UPLOAD_TYPE_RESUMABLE, }) @@ -374,14 +405,17 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model. return err } uploader := s3manager.NewUploader(s) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) } _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ Bucket: aws.String(param.Bucket), Key: aws.String(param.Key), Expires: aws.Time(param.Expiration), - Body: stream, + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + }), }) return err } @@ -433,6 +467,10 @@ func (xc *XunLeiCommon) SetTokenResp(tr *TokenResp) { xc.TokenResp = tr } +func (xc *XunLeiCommon) SetCoreTokenResp(tr *CoreLoginResp) { + xc.CoreLoginResp = tr +} + // 携带Authorization和CaptchaToken的请求 func (xc *XunLeiCommon) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { data, err := xc.Common.Request(url, method, func(req *resty.Request) { @@ -461,7 +499,7 @@ func (xc *XunLeiCommon) Request(url string, method string, callback base.ReqCall } return nil, err case 9: // 验证码token过期 - if err = xc.RefreshCaptchaTokenAtLogin(GetAction(method, url), xc.UserID); err != nil { + if err = xc.RefreshCaptchaTokenAtLogin(GetAction(method, url), xc.TokenResp.UserID); err != nil { return nil, err } default: @@ -493,20 +531,25 @@ func (xc *XunLeiCommon) RefreshToken(refreshToken string) (*TokenResp, error) { // 登录 func (xc *XunLeiCommon) Login(username, password string) (*TokenResp, error) { - url := XLUSER_API_URL + "/auth/signin" - err := xc.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), username) + //v3 login拿到 sessionID + sessionID, err := xc.CoreLogin(username, password) if err != nil { return nil, err } + //v1 login拿到令牌 + url := XLUSER_API_URL + "/auth/signin/token" + if err = xc.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), username); err != nil { + return nil, err + } var resp TokenResp _, err = xc.Common.Request(url, http.MethodPost, func(req *resty.Request) { + req.SetPathParam("client_id", xc.ClientID) req.SetBody(&SignInRequest{ - CaptchaToken: xc.GetCaptchaToken(), ClientID: xc.ClientID, ClientSecret: xc.ClientSecret, - Username: username, - Password: password, + Provider: SignProvider, + SigninToken: sessionID, }) }, &resp) if err != nil { @@ -522,3 +565,108 @@ func (xc *XunLeiCommon) IsLogin() bool { _, err := xc.Request(XLUSER_API_URL+"/user/me", http.MethodGet, nil, nil) return err == nil } + +// 离线下载文件 +func (xc *XunLeiCommon) OfflineDownload(ctx context.Context, fileUrl string, parentDir model.Obj, fileName string) (*OfflineTask, error) { + var resp OfflineDownloadResp + _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { + r.SetContext(ctx) + r.SetBody(&base.Json{ + "kind": FILE, + "name": fileName, + "parent_id": parentDir.GetID(), + "upload_type": UPLOAD_TYPE_URL, + "url": base.Json{ + "url": fileUrl, + }, + }) + }, &resp) + + if err != nil { + return nil, err + } + + return &resp.Task, err +} + +/* +获取离线下载任务列表 +*/ +func (xc *XunLeiCommon) OfflineList(ctx context.Context, nextPageToken string) ([]OfflineTask, error) { + res := make([]OfflineTask, 0) + + var resp OfflineListResp + _, err := xc.Request(TASK_API_URL, http.MethodGet, func(req *resty.Request) { + req.SetContext(ctx). + SetQueryParams(map[string]string{ + "type": "offline", + "limit": "10000", + "page_token": nextPageToken, + }) + }, &resp) + + if err != nil { + return nil, fmt.Errorf("failed to get offline list: %w", err) + } + res = append(res, resp.Tasks...) + return res, nil +} + +func (xc *XunLeiCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string, deleteFiles bool) error { + _, err := xc.Request(TASK_API_URL, http.MethodDelete, func(req *resty.Request) { + req.SetContext(ctx). + SetQueryParams(map[string]string{ + "task_ids": strings.Join(taskIDs, ","), + "delete_files": strconv.FormatBool(deleteFiles), + }) + }, nil) + if err != nil { + return fmt.Errorf("failed to delete tasks %v: %w", taskIDs, err) + } + return nil +} + +func (xc *XunLeiCommon) CoreLogin(username string, password string) (sessionID string, err error) { + url := XLUSER_API_BASE_URL + "/xluser.core.login/v3/login" + var resp CoreLoginResp + res, err := xc.Common.Request(url, http.MethodPost, func(req *resty.Request) { + req.SetHeader("User-Agent", "android-ok-http-client/xl-acc-sdk/version-5.0.12.512000") + req.SetBody(&CoreLoginRequest{ + ProtocolVersion: "301", + SequenceNo: "1000012", + PlatformVersion: "10", + IsCompressed: "0", + Appid: APPID, + ClientVersion: "8.31.0.9726", + PeerID: "00000000000000000000000000000000", + AppName: "ANDROID-com.xunlei.downloadprovider", + SdkVersion: "512000", + Devicesign: generateDeviceSign(xc.DeviceID, xc.PackageName), + NetWorkType: "WIFI", + ProviderName: "NONE", + DeviceModel: "M2004J7AC", + DeviceName: "Xiaomi_M2004j7ac", + OSVersion: "12", + Creditkey: xc.GetCreditKey(), + Hl: "zh-CN", + UserName: username, + PassWord: password, + VerifyKey: "", + VerifyCode: "", + IsMd5Pwd: "0", + }) + }, nil) + if err != nil { + return "", err + } + + if err = utils.Json.Unmarshal(res, &resp); err != nil { + return "", err + } + + xc.SetCoreTokenResp(&resp) + + sessionID = resp.SessionID + + return sessionID, nil +} diff --git a/drivers/thunder/meta.go b/drivers/thunder/meta.go index 12b01cba..5e6e2513 100644 --- a/drivers/thunder/meta.go +++ b/drivers/thunder/meta.go @@ -23,23 +23,25 @@ type ExpertAddition struct { RefreshToken string `json:"refresh_token" required:"true" help:"login type is refresh_token,this is required"` // 签名方法1 - Algorithms string `json:"algorithms" required:"true" help:"sign type is algorithms,this is required" default:"HPxr4BVygTQVtQkIMwQH33ywbgYG5l4JoR,GzhNkZ8pOBsCY+7,v+l0ImTpG7c7/,e5ztohgVXNP,t,EbXUWyVVqQbQX39Mbjn2geok3/0WEkAVxeqhtx857++kjJiRheP8l77gO,o7dvYgbRMOpHXxCs,6MW8TD8DphmakaxCqVrfv7NReRRN7ck3KLnXBculD58MvxjFRqT+,kmo0HxCKVfmxoZswLB4bVA/dwqbVAYghSb,j,4scKJNdd7F27Hv7tbt"` + Algorithms string `json:"algorithms" required:"true" help:"sign type is algorithms,this is required" default:"9uJNVj/wLmdwKrJaVj/omlQ,Oz64Lp0GigmChHMf/6TNfxx7O9PyopcczMsnf,Eb+L7Ce+Ej48u,jKY0,ASr0zCl6v8W4aidjPK5KHd1Lq3t+vBFf41dqv5+fnOd,wQlozdg6r1qxh0eRmt3QgNXOvSZO6q/GXK,gmirk+ciAvIgA/cxUUCema47jr/YToixTT+Q6O,5IiCoM9B1/788ntB,P07JH0h6qoM6TSUAK2aL9T5s2QBVeY9JWvalf,+oK0AN"` // 签名方法2 CaptchaSign string `json:"captcha_sign" required:"true" help:"sign type is captcha_sign,this is required"` Timestamp string `json:"timestamp" required:"true" help:"sign type is captcha_sign,this is required"` // 验证码 CaptchaToken string `json:"captcha_token"` + // 信任密钥 + CreditKey string `json:"credit_key" help:"credit key,used for login"` // 必要且影响登录,由签名决定 - DeviceID string `json:"device_id" required:"true" default:"9aa5c268e7bcfc197a9ad88e2fb330e5"` + DeviceID string `json:"device_id" default:""` ClientID string `json:"client_id" required:"true" default:"Xp6vsxz_7IYVw2BB"` ClientSecret string `json:"client_secret" required:"true" default:"Xp6vsy4tN9toTVdMSpomVdXpRmES"` - ClientVersion string `json:"client_version" required:"true" default:"7.51.0.8196"` + ClientVersion string `json:"client_version" required:"true" default:"8.31.0.9726"` PackageName string `json:"package_name" required:"true" default:"com.xunlei.downloadprovider"` //不影响登录,影响下载速度 - UserAgent string `json:"user_agent" required:"true" default:"ANDROID-com.xunlei.downloadprovider/7.51.0.8196 netWorkType/4G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/220200 Oauth2Client/0.9 (Linux 4_14_186-perf-gdcf98eab238b) (JAVA 0)"` + UserAgent string `json:"user_agent" required:"true" default:"ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)"` DownloadUserAgent string `json:"download_user_agent" required:"true" default:"Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)"` //优先使用视频链接代替下载链接 @@ -74,6 +76,10 @@ type Addition struct { Username string `json:"username" required:"true"` Password string `json:"password" required:"true"` CaptchaToken string `json:"captcha_token"` + // 信任密钥 + CreditKey string `json:"credit_key" help:"credit key,used for login"` + // 登录设备ID + DeviceID string `json:"device_id" default:""` } // 登录特征,用于判断是否重新登录 diff --git a/drivers/thunder/types.go b/drivers/thunder/types.go index 7c223673..1fe8432c 100644 --- a/drivers/thunder/types.go +++ b/drivers/thunder/types.go @@ -18,6 +18,10 @@ type ErrResp struct { } func (e *ErrResp) IsError() bool { + if e.ErrorMsg == "success" { + return false + } + return e.ErrorCode != 0 || e.ErrorMsg != "" || e.ErrorDescription != "" } @@ -61,13 +65,79 @@ func (t *TokenResp) Token() string { } type SignInRequest struct { - CaptchaToken string `json:"captcha_token"` - ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` - Username string `json:"username"` - Password string `json:"password"` + Provider string `json:"provider"` + SigninToken string `json:"signin_token"` +} + +type CoreLoginRequest struct { + ProtocolVersion string `json:"protocolVersion"` + SequenceNo string `json:"sequenceNo"` + PlatformVersion string `json:"platformVersion"` + IsCompressed string `json:"isCompressed"` + Appid string `json:"appid"` + ClientVersion string `json:"clientVersion"` + PeerID string `json:"peerID"` + AppName string `json:"appName"` + SdkVersion string `json:"sdkVersion"` + Devicesign string `json:"devicesign"` + NetWorkType string `json:"netWorkType"` + ProviderName string `json:"providerName"` + DeviceModel string `json:"deviceModel"` + DeviceName string `json:"deviceName"` + OSVersion string `json:"OSVersion"` + Creditkey string `json:"creditkey"` + Hl string `json:"hl"` + UserName string `json:"userName"` + PassWord string `json:"passWord"` + VerifyKey string `json:"verifyKey"` + VerifyCode string `json:"verifyCode"` + IsMd5Pwd string `json:"isMd5Pwd"` +} + +type CoreLoginResp struct { + Account string `json:"account"` + Creditkey string `json:"creditkey"` + /* Error string `json:"error"` + ErrorCode string `json:"errorCode"` + ErrorDescription string `json:"error_description"`*/ + ExpiresIn int `json:"expires_in"` + IsCompressed string `json:"isCompressed"` + IsSetPassWord string `json:"isSetPassWord"` + KeepAliveMinPeriod string `json:"keepAliveMinPeriod"` + KeepAlivePeriod string `json:"keepAlivePeriod"` + LoginKey string `json:"loginKey"` + NickName string `json:"nickName"` + PlatformVersion string `json:"platformVersion"` + ProtocolVersion string `json:"protocolVersion"` + SecureKey string `json:"secureKey"` + SequenceNo string `json:"sequenceNo"` + SessionID string `json:"sessionID"` + Timestamp string `json:"timestamp"` + UserID string `json:"userID"` + UserName string `json:"userName"` + UserNewNo string `json:"userNewNo"` + Version string `json:"version"` + /* VipList []struct { + ExpireDate string `json:"expireDate"` + IsAutoDeduct string `json:"isAutoDeduct"` + IsVip string `json:"isVip"` + IsYear string `json:"isYear"` + PayID string `json:"payId"` + PayName string `json:"payName"` + Register string `json:"register"` + Vasid string `json:"vasid"` + VasType string `json:"vasType"` + VipDayGrow string `json:"vipDayGrow"` + VipGrow string `json:"vipGrow"` + VipLevel string `json:"vipLevel"` + Icon struct { + General string `json:"general"` + Small string `json:"small"` + } `json:"icon"` + } `json:"vipList"`*/ } /* @@ -204,3 +274,76 @@ type UploadTaskResponse struct { File Files `json:"file"` } + +// 添加离线下载响应 +type OfflineDownloadResp struct { + File *string `json:"file"` + Task OfflineTask `json:"task"` + UploadType string `json:"upload_type"` + URL struct { + Kind string `json:"kind"` + } `json:"url"` +} + +// 离线下载列表 +type OfflineListResp struct { + ExpiresIn int64 `json:"expires_in"` + NextPageToken string `json:"next_page_token"` + Tasks []OfflineTask `json:"tasks"` +} + +// offlineTask +type OfflineTask struct { + Callback string `json:"callback"` + CreatedTime string `json:"created_time"` + FileID string `json:"file_id"` + FileName string `json:"file_name"` + FileSize string `json:"file_size"` + IconLink string `json:"icon_link"` + ID string `json:"id"` + Kind string `json:"kind"` + Message string `json:"message"` + Name string `json:"name"` + Params Params `json:"params"` + Phase string `json:"phase"` // PHASE_TYPE_RUNNING, PHASE_TYPE_ERROR, PHASE_TYPE_COMPLETE, PHASE_TYPE_PENDING + Progress int64 `json:"progress"` + Space string `json:"space"` + StatusSize int64 `json:"status_size"` + Statuses []string `json:"statuses"` + ThirdTaskID string `json:"third_task_id"` + Type string `json:"type"` + UpdatedTime string `json:"updated_time"` + UserID string `json:"user_id"` +} + +type Params struct { + FolderType string `json:"folder_type"` + PredictSpeed string `json:"predict_speed"` + PredictType string `json:"predict_type"` +} + +// LoginReviewResp 登录验证响应 +type LoginReviewResp struct { + Creditkey string `json:"creditkey"` + Error string `json:"error"` + ErrorCode string `json:"errorCode"` + ErrorDesc string `json:"errorDesc"` + ErrorDescURL string `json:"errorDescUrl"` + ErrorIsRetry int `json:"errorIsRetry"` + ErrorDescription string `json:"error_description"` + IsCompressed string `json:"isCompressed"` + PlatformVersion string `json:"platformVersion"` + ProtocolVersion string `json:"protocolVersion"` + Reviewurl string `json:"reviewurl"` + SequenceNo string `json:"sequenceNo"` + UserID string `json:"userID"` + VerifyType string `json:"verifyType"` +} + +// ReviewData 验证数据 +type ReviewData struct { + Creditkey string `json:"creditkey"` + Reviewurl string `json:"reviewurl"` + Deviceid string `json:"deviceid"` + Devicesign string `json:"devicesign"` +} diff --git a/drivers/thunder/util.go b/drivers/thunder/util.go index 3ec8db58..b7afe56d 100644 --- a/drivers/thunder/util.go +++ b/drivers/thunder/util.go @@ -1,8 +1,10 @@ package thunder import ( + "crypto/md5" "crypto/sha1" "encoding/hex" + "encoding/json" "fmt" "io" "net/http" @@ -15,9 +17,11 @@ import ( ) const ( - API_URL = "https://api-pan.xunlei.com/drive/v1" - FILE_API_URL = API_URL + "/files" - XLUSER_API_URL = "https://xluser-ssl.xunlei.com/v1" + API_URL = "https://api-pan.xunlei.com/drive/v1" + FILE_API_URL = API_URL + "/files" + TASK_API_URL = API_URL + "/tasks" + XLUSER_API_BASE_URL = "https://xluser-ssl.xunlei.com" + XLUSER_API_URL = XLUSER_API_BASE_URL + "/v1" ) const ( @@ -33,6 +37,12 @@ const ( UPLOAD_TYPE_URL = "UPLOAD_TYPE_URL" ) +const ( + SignProvider = "access_end_point_token" + APPID = "40" + APPKey = "34a062aaa22f906fca4fefe9fb3a3021" +) + func GetAction(method string, url string) string { urlpath := regexp.MustCompile(`://[^/]+((/[^/\s?#]+)*)`).FindStringSubmatch(url)[1] return method + ":" + urlpath @@ -43,6 +53,8 @@ type Common struct { captchaToken string + creditKey string + // 签名相关,二选一 Algorithms []string Timestamp, CaptchaSign string @@ -68,6 +80,13 @@ func (c *Common) GetCaptchaToken() string { return c.captchaToken } +func (c *Common) SetCreditKey(creditKey string) { + c.creditKey = creditKey +} +func (c *Common) GetCreditKey() string { + return c.creditKey +} + // 刷新验证码token(登录后) func (c *Common) RefreshCaptchaTokenAtLogin(action, userID string) error { metas := map[string]string{ @@ -169,12 +188,53 @@ func (c *Common) Request(url, method string, callback base.ReqCallback, resp int var erron ErrResp utils.Json.Unmarshal(res.Body(), &erron) if erron.IsError() { + // review_panel 表示需要短信验证码进行验证 + if erron.ErrorMsg == "review_panel" { + return nil, c.getReviewData(res) + } + return nil, &erron } return res.Body(), nil } +// 获取验证所需内容 +func (c *Common) getReviewData(res *resty.Response) error { + var reviewResp LoginReviewResp + var reviewData ReviewData + + if err := utils.Json.Unmarshal(res.Body(), &reviewResp); err != nil { + return err + } + + deviceSign := generateDeviceSign(c.DeviceID, c.PackageName) + + reviewData = ReviewData{ + Creditkey: reviewResp.Creditkey, + Reviewurl: reviewResp.Reviewurl + "&deviceid=" + deviceSign, + Deviceid: deviceSign, + Devicesign: deviceSign, + } + + // 将reviewData转为JSON字符串 + reviewDataJSON, _ := json.MarshalIndent(reviewData, "", " ") + //reviewDataJSON, _ := json.Marshal(reviewData) + + return fmt.Errorf(` +
+ 🔒 本次登录需要验证
+ This login requires verification + +

下面是验证所需要的数据,具体使用方法请参照对应的驱动文档
+ Below are the relevant verification data. For specific usage methods, please refer to the corresponding driver documentation.

+
+
%s
+
+
`, string(reviewDataJSON)) +} + // 计算文件Gcid func getGcid(r io.Reader, size int64) (string, error) { calcBlockSize := func(j int64) int64 { @@ -200,3 +260,24 @@ func getGcid(r io.Reader, size int64) (string, error) { } return hex.EncodeToString(hash1.Sum(nil)), nil } + +func generateDeviceSign(deviceID, packageName string) string { + + signatureBase := fmt.Sprintf("%s%s%s%s", deviceID, packageName, APPID, APPKey) + + sha1Hash := sha1.New() + sha1Hash.Write([]byte(signatureBase)) + sha1Result := sha1Hash.Sum(nil) + + sha1String := hex.EncodeToString(sha1Result) + + md5Hash := md5.New() + md5Hash.Write([]byte(sha1String)) + md5Result := md5Hash.Sum(nil) + + md5String := hex.EncodeToString(md5Result) + + deviceSign := fmt.Sprintf("div101.%s%s", deviceID, md5String) + + return deviceSign +} diff --git a/drivers/thunder_browser/driver.go b/drivers/thunder_browser/driver.go index 96dd7e8e..0b38d077 100644 --- a/drivers/thunder_browser/driver.go +++ b/drivers/thunder_browser/driver.go @@ -4,10 +4,15 @@ import ( "context" "errors" "fmt" + "io" + "net/http" + "strings" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + streamPkg "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/aws/aws-sdk-go/aws" @@ -15,9 +20,6 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/go-resty/resty/v2" - "io" - "net/http" - "strings" ) type ThunderBrowser struct { @@ -456,15 +458,10 @@ func (xc *XunLeiBrowserCommon) Remove(ctx context.Context, obj model.Obj) error } func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - hi := stream.GetHash() - gcid := hi.GetHash(hash_extend.GCID) + gcid := stream.GetHash().GetHash(hash_extend.GCID) + var err error if len(gcid) < hash_extend.GCID.Width { - tFile, err := stream.CacheFullInTempFile() - if err != nil { - return err - } - - gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize()) + _, gcid, err = streamPkg.CacheFullInTempFileAndHash(stream, hash_extend.GCID, stream.GetSize()) if err != nil { return err } @@ -481,7 +478,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream } var resp UploadTaskResponse - _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { + _, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { r.SetContext(ctx) r.SetBody(&js) }, &resp) @@ -508,7 +505,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream Bucket: aws.String(param.Bucket), Key: aws.String(param.Key), Expires: aws.Time(param.Expiration), - Body: io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up)), + Body: driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up))), }) return err } diff --git a/drivers/thunderx/driver.go b/drivers/thunderx/driver.go index b9ee668c..6ee8901a 100644 --- a/drivers/thunderx/driver.go +++ b/drivers/thunderx/driver.go @@ -3,11 +3,15 @@ package thunderx import ( "context" "fmt" + "net/http" + "strings" + "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash" "github.com/aws/aws-sdk-go/aws" @@ -15,8 +19,6 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/go-resty/resty/v2" - "net/http" - "strings" ) type ThunderX struct { @@ -363,29 +365,24 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error { return err } -func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - hi := stream.GetHash() - gcid := hi.GetHash(hash_extend.GCID) +func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + gcid := file.GetHash().GetHash(hash_extend.GCID) + var err error if len(gcid) < hash_extend.GCID.Width { - tFile, err := stream.CacheFullInTempFile() - if err != nil { - return err - } - - gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize()) + _, gcid, err = stream.CacheFullInTempFileAndHash(file, hash_extend.GCID, file.GetSize()) if err != nil { return err } } var resp UploadTaskResponse - _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { + _, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { r.SetContext(ctx) r.SetBody(&base.Json{ "kind": FILE, "parent_id": dstDir.GetID(), - "name": stream.GetName(), - "size": stream.GetSize(), + "name": file.GetName(), + "size": file.GetSize(), "hash": gcid, "upload_type": UPLOAD_TYPE_RESUMABLE, }) @@ -406,14 +403,17 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model return err } uploader := s3manager.NewUploader(s) - if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { - uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) } _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ Bucket: aws.String(param.Bucket), Key: aws.String(param.Key), Expires: aws.Time(param.Expiration), - Body: stream, + Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + }), }) return err } diff --git a/drivers/trainbit/driver.go b/drivers/trainbit/driver.go index 795b2fb8..f4f4bf3f 100644 --- a/drivers/trainbit/driver.go +++ b/drivers/trainbit/driver.go @@ -58,7 +58,7 @@ func (d *Trainbit) List(ctx context.Context, dir model.Obj, args model.ListArgs) return nil, err } var jsonData any - json.Unmarshal(data, &jsonData) + err = json.Unmarshal(data, &jsonData) if err != nil { return nil, err } @@ -114,23 +114,18 @@ func (d *Trainbit) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { endpoint, _ := url.Parse("https://tb28.trainbit.com/api/upload/send_raw/") query := &url.Values{} query.Add("q", strings.Split(dstDir.GetID(), "_")[1]) query.Add("guid", guid) - query.Add("name", url.QueryEscape(local2provider(stream.GetName(), false)+".")) + query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+".")) endpoint.RawQuery = query.Encode() - var total int64 - total = 0 - progressReader := &ProgressReader{ - stream, - func(byteNum int) { - total += int64(byteNum) - up(float64(total) / float64(stream.GetSize()) * 100) - }, - } - req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader) + progressReader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader) if err != nil { return err } diff --git a/drivers/trainbit/util.go b/drivers/trainbit/util.go index afc111a8..486e8851 100644 --- a/drivers/trainbit/util.go +++ b/drivers/trainbit/util.go @@ -13,17 +13,6 @@ import ( "github.com/alist-org/alist/v3/internal/model" ) -type ProgressReader struct { - io.Reader - reporter func(byteNum int) -} - -func (progressReader *ProgressReader) Read(data []byte) (int, error) { - byteNum, err := progressReader.Reader.Read(data) - progressReader.reporter(byteNum) - return byteNum, err -} - func get(url string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) { req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { diff --git a/drivers/url_tree/driver.go b/drivers/url_tree/driver.go index 6a45bb7d..049bd2db 100644 --- a/drivers/url_tree/driver.go +++ b/drivers/url_tree/driver.go @@ -2,11 +2,15 @@ package url_tree import ( "context" + "errors" stdpath "path" + "strings" + "sync" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/pkg/utils" log "github.com/sirupsen/logrus" ) @@ -14,7 +18,8 @@ import ( type Urls struct { model.Storage Addition - root *Node + root *Node + mutex sync.RWMutex } func (d *Urls) Config() driver.Config { @@ -40,11 +45,15 @@ func (d *Urls) Drop(ctx context.Context) error { } func (d *Urls) Get(ctx context.Context, path string) (model.Obj, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() node := GetNodeFromRootByPath(d.root, path) return nodeToObj(node, path) } func (d *Urls) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() node := GetNodeFromRootByPath(d.root, dir.GetPath()) log.Debugf("path: %s, node: %+v", dir.GetPath(), node) if node == nil { @@ -59,6 +68,8 @@ func (d *Urls) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([] } func (d *Urls) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() node := GetNodeFromRootByPath(d.root, file.GetPath()) log.Debugf("path: %s, node: %+v", file.GetPath(), node) if node == nil { @@ -72,6 +83,192 @@ func (d *Urls) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* return nil, errs.NotFile } +func (d *Urls) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + node := GetNodeFromRootByPath(d.root, parentDir.GetPath()) + if node == nil { + return nil, errs.ObjectNotFound + } + if node.isFile() { + return nil, errs.NotFolder + } + dir := &Node{ + Name: dirName, + Level: node.Level + 1, + } + node.Children = append(node.Children, dir) + d.updateStorage() + return nodeToObj(dir, stdpath.Join(parentDir.GetPath(), dirName)) +} + +func (d *Urls) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) { + return nil, errors.New("cannot move parent dir to child") + } + d.mutex.Lock() + defer d.mutex.Unlock() + dstNode := GetNodeFromRootByPath(d.root, dstDir.GetPath()) + if dstNode == nil || dstNode.isFile() { + return nil, errs.NotFolder + } + srcDir, srcName := stdpath.Split(srcObj.GetPath()) + srcParentNode := GetNodeFromRootByPath(d.root, srcDir) + if srcParentNode == nil { + return nil, errs.ObjectNotFound + } + newChildren := make([]*Node, 0, len(srcParentNode.Children)) + var srcNode *Node + for _, child := range srcParentNode.Children { + if child.Name == srcName { + srcNode = child + } else { + newChildren = append(newChildren, child) + } + } + if srcNode == nil { + return nil, errs.ObjectNotFound + } + srcParentNode.Children = newChildren + srcNode.setLevel(dstNode.Level + 1) + dstNode.Children = append(dstNode.Children, srcNode) + d.root.calSize() + d.updateStorage() + return nodeToObj(srcNode, stdpath.Join(dstDir.GetPath(), srcName)) +} + +func (d *Urls) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + srcNode := GetNodeFromRootByPath(d.root, srcObj.GetPath()) + if srcNode == nil { + return nil, errs.ObjectNotFound + } + srcNode.Name = newName + d.updateStorage() + return nodeToObj(srcNode, stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName)) +} + +func (d *Urls) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + if strings.HasPrefix(dstDir.GetPath(), srcObj.GetPath()) { + return nil, errors.New("cannot copy parent dir to child") + } + d.mutex.Lock() + defer d.mutex.Unlock() + dstNode := GetNodeFromRootByPath(d.root, dstDir.GetPath()) + if dstNode == nil || dstNode.isFile() { + return nil, errs.NotFolder + } + srcNode := GetNodeFromRootByPath(d.root, srcObj.GetPath()) + if srcNode == nil { + return nil, errs.ObjectNotFound + } + newNode := srcNode.deepCopy(dstNode.Level + 1) + dstNode.Children = append(dstNode.Children, newNode) + d.root.calSize() + d.updateStorage() + return nodeToObj(newNode, stdpath.Join(dstDir.GetPath(), stdpath.Base(srcObj.GetPath()))) +} + +func (d *Urls) Remove(ctx context.Context, obj model.Obj) error { + if !d.Writable { + return errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + objDir, objName := stdpath.Split(obj.GetPath()) + nodeParent := GetNodeFromRootByPath(d.root, objDir) + if nodeParent == nil { + return errs.ObjectNotFound + } + newChildren := make([]*Node, 0, len(nodeParent.Children)) + var deletedObj *Node + for _, child := range nodeParent.Children { + if child.Name != objName { + newChildren = append(newChildren, child) + } else { + deletedObj = child + } + } + if deletedObj == nil { + return errs.ObjectNotFound + } + nodeParent.Children = newChildren + if deletedObj.Size > 0 { + d.root.calSize() + } + d.updateStorage() + return nil +} + +func (d *Urls) PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error) { + if !d.Writable { + return nil, errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + dirNode := GetNodeFromRootByPath(d.root, dstDir.GetPath()) + if dirNode == nil || dirNode.isFile() { + return nil, errs.NotFolder + } + newNode := &Node{ + Name: name, + Level: dirNode.Level + 1, + Url: url, + } + dirNode.Children = append(dirNode.Children, newNode) + if d.HeadSize { + size, err := getSizeFromUrl(url) + if err != nil { + log.Errorf("get size from url error: %s", err) + } else { + newNode.Size = size + d.root.calSize() + } + } + d.updateStorage() + return nodeToObj(newNode, stdpath.Join(dstDir.GetPath(), name)) +} + +func (d *Urls) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + if !d.Writable { + return errs.PermissionDenied + } + d.mutex.Lock() + defer d.mutex.Unlock() + node := GetNodeFromRootByPath(d.root, dstDir.GetPath()) // parent + if node == nil { + return errs.ObjectNotFound + } + if node.isFile() { + return errs.NotFolder + } + file, err := parseFileLine(stream.GetName(), d.HeadSize) + if err != nil { + return err + } + node.Children = append(node.Children, file) + d.updateStorage() + return nil +} + +func (d *Urls) updateStorage() { + d.UrlStructure = StringifyTree(d.root) + op.MustSaveDriverStorage(d) +} + //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/url_tree/meta.go b/drivers/url_tree/meta.go index b3ae33dc..c40414f5 100644 --- a/drivers/url_tree/meta.go +++ b/drivers/url_tree/meta.go @@ -12,6 +12,7 @@ type Addition struct { // define other UrlStructure string `json:"url_structure" type:"text" required:"true" default:"https://jsd.nn.ci/gh/alist-org/alist/README.md\nhttps://jsd.nn.ci/gh/alist-org/alist/README_cn.md\nfolder:\n CONTRIBUTING.md:1635:https://jsd.nn.ci/gh/alist-org/alist/CONTRIBUTING.md\n CODE_OF_CONDUCT.md:2093:https://jsd.nn.ci/gh/alist-org/alist/CODE_OF_CONDUCT.md" help:"structure:FolderName:\n [FileName:][FileSize:][Modified:]Url"` HeadSize bool `json:"head_size" type:"bool" default:"false" help:"Use head method to get file size, but it may be failed."` + Writable bool `json:"writable" type:"bool" default:"false"` } var config = driver.Config{ @@ -20,7 +21,7 @@ var config = driver.Config{ OnlyLocal: false, OnlyProxy: false, NoCache: true, - NoUpload: true, + NoUpload: false, NeedMs: false, DefaultRoot: "", CheckStatus: true, diff --git a/drivers/url_tree/types.go b/drivers/url_tree/types.go index 7e8ca3d9..cf62d29d 100644 --- a/drivers/url_tree/types.go +++ b/drivers/url_tree/types.go @@ -1,5 +1,7 @@ package url_tree +import "github.com/alist-org/alist/v3/pkg/utils" + // Node is a node in the folder tree type Node struct { Url string @@ -44,3 +46,19 @@ func (node *Node) calSize() int64 { node.Size = size return size } + +func (node *Node) setLevel(level int) { + node.Level = level + for _, child := range node.Children { + child.setLevel(level + 1) + } +} + +func (node *Node) deepCopy(level int) *Node { + ret := *node + ret.Level = level + ret.Children, _ = utils.SliceConvert(ret.Children, func(child *Node) (*Node, error) { + return child.deepCopy(level + 1), nil + }) + return &ret +} diff --git a/drivers/url_tree/util.go b/drivers/url_tree/util.go index 4065218f..61a3fde2 100644 --- a/drivers/url_tree/util.go +++ b/drivers/url_tree/util.go @@ -153,6 +153,9 @@ func splitPath(path string) []string { if path == "/" { return []string{"root"} } + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } parts := strings.Split(path, "/") parts[0] = "root" return parts @@ -190,3 +193,46 @@ func getSizeFromUrl(url string) (int64, error) { } return size, nil } + +func StringifyTree(node *Node) string { + sb := strings.Builder{} + if node.Level == -1 { + for i, child := range node.Children { + sb.WriteString(StringifyTree(child)) + if i < len(node.Children)-1 { + sb.WriteString("\n") + } + } + return sb.String() + } + for i := 0; i < node.Level; i++ { + sb.WriteString(" ") + } + if node.Url == "" { + sb.WriteString(node.Name) + sb.WriteString(":") + for _, child := range node.Children { + sb.WriteString("\n") + sb.WriteString(StringifyTree(child)) + } + } else if node.Size == 0 && node.Modified == 0 { + if stdpath.Base(node.Url) == node.Name { + sb.WriteString(node.Url) + } else { + sb.WriteString(fmt.Sprintf("%s:%s", node.Name, node.Url)) + } + } else { + sb.WriteString(node.Name) + sb.WriteString(":") + if node.Size != 0 || node.Modified != 0 { + sb.WriteString(strconv.FormatInt(node.Size, 10)) + sb.WriteString(":") + } + if node.Modified != 0 { + sb.WriteString(strconv.FormatInt(node.Modified, 10)) + sb.WriteString(":") + } + sb.WriteString(node.Url) + } + return sb.String() +} diff --git a/drivers/uss/driver.go b/drivers/uss/driver.go index 447515d8..2e219050 100644 --- a/drivers/uss/driver.go +++ b/drivers/uss/driver.go @@ -3,6 +3,7 @@ package uss import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/stream" "net/url" "path" "strings" @@ -122,11 +123,13 @@ func (d *USS) Remove(ctx context.Context, obj model.Obj) error { }) } -func (d *USS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { - // TODO not support cancel?? +func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { return d.client.Put(&upyun.PutObjectConfig{ - Path: getKey(path.Join(dstDir.GetPath(), stream.GetName()), false), - Reader: stream, + Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false), + Reader: driver.NewLimitedUploadStream(ctx, &stream.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }), }) } diff --git a/drivers/vtencent/drive.go b/drivers/vtencent/drive.go index 67643143..36a91672 100644 --- a/drivers/vtencent/drive.go +++ b/drivers/vtencent/drive.go @@ -55,7 +55,9 @@ func (d *Vtencent) Init(ctx context.Context) error { } func (d *Vtencent) Drop(ctx context.Context) error { - d.cron.Stop() + if d.cron != nil { + d.cron.Stop() + } return nil } diff --git a/drivers/vtencent/util.go b/drivers/vtencent/util.go index ba87f1ab..4ba72d1b 100644 --- a/drivers/vtencent/util.go +++ b/drivers/vtencent/util.go @@ -8,9 +8,7 @@ import ( "fmt" "io" "net/http" - "path" "strconv" - "strings" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/driver" @@ -151,7 +149,7 @@ func (d *Vtencent) ApplyUploadUGC(signature string, stream model.FileStreamer) ( form := base.Json{ "signature": signature, "videoName": stream.GetName(), - "videoType": strings.ReplaceAll(path.Ext(stream.GetName()), ".", ""), + "videoType": utils.Ext(stream.GetName()), "videoSize": stream.GetSize(), } var resps RspApplyUploadUGC @@ -278,7 +276,8 @@ func (d *Vtencent) FileUpload(ctx context.Context, dstDir model.Obj, stream mode input := &s3manager.UploadInput{ Bucket: aws.String(fmt.Sprintf("%s-%d", params.StorageBucket, params.StorageAppID)), Key: ¶ms.Video.StoragePath, - Body: io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up))), + Body: driver.NewLimitedUploadStream(ctx, + io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up)))), } _, err = uploader.UploadWithContext(ctx, input) if err != nil { diff --git a/drivers/webdav/driver.go b/drivers/webdav/driver.go index b402b1db..45150fca 100644 --- a/drivers/webdav/driver.go +++ b/drivers/webdav/driver.go @@ -93,13 +93,16 @@ func (d *WebDav) Remove(ctx context.Context, obj model.Obj) error { return d.client.RemoveAll(getPath(obj)) } -func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { callback := func(r *http.Request) { - r.Header.Set("Content-Type", stream.GetMimetype()) - r.ContentLength = stream.GetSize() + r.Header.Set("Content-Type", s.GetMimetype()) + r.ContentLength = s.GetSize() } - // TODO: support cancel - err := d.client.WriteStream(path.Join(dstDir.GetPath(), stream.GetName()), stream, 0644, callback) + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) + err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), reader, 0644, callback) return err } diff --git a/drivers/weiyun/driver.go b/drivers/weiyun/driver.go index e6d5897c..90793d33 100644 --- a/drivers/weiyun/driver.go +++ b/drivers/weiyun/driver.go @@ -7,6 +7,7 @@ import ( "math" "net/http" "strconv" + "sync/atomic" "time" "github.com/alist-org/alist/v3/drivers/base" @@ -69,7 +70,7 @@ func (d *WeiYun) Init(ctx context.Context) error { if d.client.LoginType() == 1 { d.cron = cron.NewCron(time.Minute * 5) d.cron.Do(func() { - d.client.KeepAlive() + _ = d.client.KeepAlive() }) } @@ -311,77 +312,83 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr // NOTE: // 秒传需要sha1最后一个状态,但sha1无法逆运算需要读完整个文件(或许可以??) // 服务器支持上传进度恢复,不需要额外实现 - if folder, ok := dstDir.(*Folder); ok { - file, err := stream.CacheFullInTempFile() - if err != nil { - return nil, err - } + var folder *Folder + var ok bool + if folder, ok = dstDir.(*Folder); !ok { + return nil, errs.NotSupport + } + file, err := stream.CacheFullInTempFile() + if err != nil { + return nil, err + } - // step 1. - preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{ - PdirKey: folder.GetPKey(), - DirKey: folder.DirKey, + // step 1. + preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{ + PdirKey: folder.GetPKey(), + DirKey: folder.DirKey, - FileName: stream.GetName(), - FileSize: stream.GetSize(), - File: file, + FileName: stream.GetName(), + FileSize: stream.GetSize(), + File: file, - ChannelCount: 4, - FileExistOption: 1, - }) - if err != nil { - return nil, err - } + ChannelCount: 4, + FileExistOption: 1, + }) + if err != nil { + return nil, err + } - // not fast upload - if !preData.FileExist { - // step.2 增加上传通道 - if len(preData.ChannelList) < d.uploadThread { - newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData) - if err != nil { - return nil, err - } - preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...) - } - // step.3 上传 - threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList), - retry.Attempts(3), - retry.Delay(time.Second), - retry.DelayType(retry.BackOffDelay)) - - for _, channel := range preData.ChannelList { - if utils.IsCanceled(upCtx) { - break - } - - var channel = channel - threadG.Go(func(ctx context.Context) error { - for { - channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len))) - upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData, - io.NewSectionReader(file, channel.Offset, int64(channel.Len))) - if err != nil { - return err - } - // 上传完成 - if upData.UploadState != 1 { - return nil - } - channel = upData.Channel - } - }) - } - if err = threadG.Wait(); err != nil { + // not fast upload + if !preData.FileExist { + // step.2 增加上传通道 + if len(preData.ChannelList) < d.uploadThread { + newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData) + if err != nil { return nil, err } + preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...) } + // step.3 上传 + threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList), + retry.Attempts(3), + retry.Delay(time.Second), + retry.DelayType(retry.BackOffDelay)) - return &File{ - PFolder: folder, - File: preData.File, - }, nil + total := atomic.Int64{} + for _, channel := range preData.ChannelList { + if utils.IsCanceled(upCtx) { + break + } + + var channel = channel + threadG.Go(func(ctx context.Context) error { + for { + channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len))) + len64 := int64(channel.Len) + upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData, + driver.NewLimitedUploadStream(ctx, io.NewSectionReader(file, channel.Offset, len64))) + if err != nil { + return err + } + cur := total.Add(len64) + up(float64(cur) * 100.0 / float64(stream.GetSize())) + // 上传完成 + if upData.UploadState != 1 { + return nil + } + channel = upData.Channel + } + }) + } + if err = threadG.Wait(); err != nil { + return nil, err + } } - return nil, errs.NotSupport + + return &File{ + PFolder: folder, + File: preData.File, + }, nil } // func (d *WeiYun) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { diff --git a/drivers/wopan/driver.go b/drivers/wopan/driver.go index bccce4b1..82ec05a9 100644 --- a/drivers/wopan/driver.go +++ b/drivers/wopan/driver.go @@ -155,12 +155,13 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre _, err := d.client.Upload2C(d.getSpaceType(), wopan.Upload2CFile{ Name: stream.GetName(), Size: stream.GetSize(), - Content: stream, + Content: driver.NewLimitedUploadStream(ctx, stream), ContentType: stream.GetMimetype(), }, dstDir.GetID(), d.FamilyID, wopan.Upload2COption{ OnProgress: func(current, total int64) { up(100 * float64(current) / float64(total)) }, + Ctx: ctx, }) return err } diff --git a/drivers/yandex_disk/driver.go b/drivers/yandex_disk/driver.go index 5af9f2e4..6e5ca05c 100644 --- a/drivers/yandex_disk/driver.go +++ b/drivers/yandex_disk/driver.go @@ -106,25 +106,31 @@ func (d *YandexDisk) Remove(ctx context.Context, obj model.Obj) error { return err } -func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { +func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { var resp UploadResp _, err := d.request("/upload", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(map[string]string{ - "path": path.Join(dstDir.GetPath(), stream.GetName()), + "path": path.Join(dstDir.GetPath(), s.GetName()), "overwrite": "true", }) }, &resp) if err != nil { return err } - req, err := http.NewRequest(resp.Method, resp.Href, stream) + reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ + Reader: s, + UpdateProgress: up, + }) + req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, reader) if err != nil { return err } - req = req.WithContext(ctx) - req.Header.Set("Content-Length", strconv.FormatInt(stream.GetSize(), 10)) + req.Header.Set("Content-Length", strconv.FormatInt(s.GetSize(), 10)) req.Header.Set("Content-Type", "application/octet-stream") res, err := base.HttpClient.Do(req) + if err != nil { + return err + } _ = res.Body.Close() return err } diff --git a/entrypoint.sh b/entrypoint.sh index a0d80835..c24ed6ee 100644 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -1,11 +1,19 @@ -#!/bin/bash - -chown -R ${PUID}:${PGID} /opt/alist/ +#!/bin/sh umask ${UMASK} if [ "$1" = "version" ]; then ./alist version else + if [ "$RUN_ARIA2" = "true" ]; then + chown -R ${PUID}:${PGID} /opt/aria2/ + exec su-exec ${PUID}:${PGID} nohup aria2c \ + --enable-rpc \ + --rpc-allow-origin-all \ + --conf-path=/opt/aria2/.aria2/aria2.conf \ + >/dev/null 2>&1 & + fi + + chown -R ${PUID}:${PGID} /opt/alist/ exec su-exec ${PUID}:${PGID} ./alist server --no-prefix fi \ No newline at end of file diff --git a/go.mod b/go.mod index 8ec1c302..e8afe0e7 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,12 @@ module github.com/alist-org/alist/v3 -go 1.22.4 +go 1.23.4 require ( - github.com/SheltonZhu/115driver v1.0.27 + github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 + github.com/KirCute/sftpd-alist v0.0.12 + github.com/ProtonMail/go-crypto v1.0.0 + github.com/SheltonZhu/115driver v1.0.34 github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4 github.com/alist-org/gofakes3 v0.0.7 @@ -25,7 +28,6 @@ require ( github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 github.com/foxxorcat/mopan-sdk-go v0.1.6 github.com/foxxorcat/weiyun-sdk-go v0.1.3 - github.com/gaoyb7/115drive-webdav v0.1.8 github.com/gin-contrib/cors v1.7.2 github.com/gin-gonic/gin v1.10.0 github.com/go-resty/resty/v2 v2.14.0 @@ -33,37 +35,42 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 + github.com/hekmon/transmissionrpc/v3 v3.0.0 github.com/hirochachacha/go-smb2 v1.1.0 github.com/ipfs/go-ipfs-api v0.7.0 github.com/jlaffaye/ftp v0.2.0 github.com/json-iterator/go v1.1.12 + github.com/kdomanski/iso9660 v0.4.0 github.com/larksuite/oapi-sdk-go/v3 v3.3.1 github.com/maruel/natural v1.1.1 github.com/meilisearch/meilisearch-go v0.27.2 + github.com/mholt/archives v0.1.0 github.com/minio/sio v0.4.0 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/ncw/swift/v2 v2.0.3 - github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/pquerna/otp v1.4.0 github.com/rclone/rclone v1.67.0 + github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d github.com/sirupsen/logrus v1.9.3 + github.com/spf13/afero v1.11.0 github.com/spf13/cobra v1.8.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 github.com/u2takey/ffmpeg-go v0.5.0 github.com/upyun/go-sdk/v3 v3.0.4 github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 - github.com/xhofe/tache v0.1.2 + github.com/xhofe/tache v0.1.5 github.com/xhofe/wopan-sdk-go v0.1.3 + github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 - golang.org/x/crypto v0.27.0 + golang.org/x/crypto v0.36.0 golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e golang.org/x/image v0.19.0 - golang.org/x/net v0.28.0 + golang.org/x/net v0.38.0 golang.org/x/oauth2 v0.22.0 - golang.org/x/time v0.6.0 + golang.org/x/time v0.8.0 google.golang.org/appengine v1.6.8 gopkg.in/ldap.v3 v3.1.0 gorm.io/driver/mysql v1.5.7 @@ -73,17 +80,47 @@ require ( ) require ( - github.com/BurntSushi/toml v0.3.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect +) + +require ( + github.com/STARRY-S/zip v0.2.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect github.com/blevesearch/go-faiss v1.0.20 // indirect github.com/blevesearch/zapx/v16 v16.1.5 // indirect + github.com/bodgit/plumbing v1.3.0 // indirect + github.com/bodgit/sevenzip v1.6.0 + github.com/bodgit/windows v1.0.1 // indirect github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/charmbracelet/x/ansi v0.2.3 // indirect github.com/charmbracelet/x/term v0.2.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect + github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/fclairamb/go-log v0.5.0 // indirect + github.com/gorilla/css v1.0.1 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hekmon/cunits/v2 v2.1.0 // indirect github.com/ipfs/boxo v0.12.0 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/matoous/go-nanoid/v2 v2.1.0 // indirect + github.com/microcosm-cc/bluemonday v1.0.27 + github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 + github.com/sorairolake/lzip-go v0.3.5 // indirect + github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect + github.com/therootcompany/xz v1.0.1 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/xhofe/115-sdk-go v0.1.5 + github.com/yuin/goldmark v1.7.8 + go4.org v0.0.0-20230225012048-214862532bf5 + resty.dev/v3 v3.0.0-beta.2 // indirect ) require ( @@ -91,8 +128,8 @@ require ( github.com/RoaringBitmap/roaring v1.9.3 // indirect github.com/abbot/go-http-auth v0.4.0 // indirect github.com/aead/ecdh v0.2.0 // indirect - github.com/andreburgaud/crypt2go v1.2.0 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect + github.com/andreburgaud/crypt2go v1.8.0 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect github.com/axgle/mahonia v0.0.0-20180208002826-3358181d7394 github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/benbjohnson/clock v1.3.0 // indirect @@ -114,7 +151,6 @@ require ( github.com/blevesearch/zapx/v13 v13.3.10 // indirect github.com/blevesearch/zapx/v14 v14.3.10 // indirect github.com/blevesearch/zapx/v15 v15.3.13 // indirect - github.com/bluele/gcache v0.0.2 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/bytedance/sonic v1.11.6 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -147,13 +183,12 @@ require ( github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgx/v5 v5.5.5 // indirect - github.com/jaevor/go-nanoid v1.3.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect - github.com/klauspost/compress v1.17.8 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kr/fs v0.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect @@ -186,8 +221,9 @@ require ( github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect + github.com/otiai10/copy v1.14.0 github.com/pelletier/go-toml/v2 v2.2.2 // indirect - github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect @@ -216,10 +252,10 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect go.etcd.io/bbolt v1.3.8 // indirect golang.org/x/arch v0.8.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/term v0.24.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 golang.org/x/tools v0.24.0 // indirect google.golang.org/api v0.169.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect @@ -231,3 +267,5 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect ) + +// replace github.com/xhofe/115-sdk-go => ../../xhofe/115-sdk-go diff --git a/go.sum b/go.sum index 6ba075f3..6fbaeb2b 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,47 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po= +github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E= +github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnOuISdg= +github.com/KirCute/sftpd-alist v0.0.12/go.mod h1:2wNK7yyW2XfjyJq10OY6xB4COLac64hOwfV6clDJn6s= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM= github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= -github.com/SheltonZhu/115driver v1.0.27 h1:Ya1HYHYXFmi7JnqQ/+Vy6xZvq3leto+E+PxTm6UChj8= -github.com/SheltonZhu/115driver v1.0.27/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4= +github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg= +github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4= +github.com/SheltonZhu/115driver v1.0.34 h1:zhMLp4vgq7GksqvSxQQDOVfK6EOHldQl4b2n8tnZ+EE= +github.com/SheltonZhu/115driver v1.0.34/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E= github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A= github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 h1:h6q5E9aMBhhdqouW81LozVPI1I+Pu6IxL2EKpfm5OjY= @@ -25,10 +58,11 @@ github.com/alist-org/times v0.0.0-20240721124654-efa0c7d3ad92 h1:pIEI87zhv8ZzQcu github.com/alist-org/times v0.0.0-20240721124654-efa0c7d3ad92/go.mod h1:oPJwGY3sLmGgcJamGumz//0A35f4BwQRacyqLNcJTOU= github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g= github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/andreburgaud/crypt2go v1.2.0 h1:oly/ENAodeqTYpUafgd4r3v+VKLQnmOKUyfpj+TxHbE= -github.com/andreburgaud/crypt2go v1.2.0/go.mod h1:kKRqlrX/3Q9Ki7HdUsoh0cX1Urq14/Hcta4l4VrIXrI= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= +github.com/andreburgaud/crypt2go v1.8.0 h1:J73vGTb1P6XL69SSuumbKs0DWn3ulbl9L92ZXBjw6pc= +github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAPJAF5fKOLB9SXg= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -40,6 +74,8 @@ github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiE github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -84,20 +120,24 @@ github.com/blevesearch/zapx/v15 v15.3.13 h1:6EkfaZiPlAxqXz0neniq35my6S48QI94W/wy github.com/blevesearch/zapx/v15 v15.3.13/go.mod h1:Turk/TNRKj9es7ZpKK95PS7f6D44Y7fAFy8F4LXQtGg= github.com/blevesearch/zapx/v16 v16.1.5 h1:b0sMcarqNFxuXvjoXsF8WtwVahnxyhEvBSRJi/AUHjU= github.com/blevesearch/zapx/v16 v16.1.5/go.mod h1:J4mSF39w1QELc11EWRSBFkPeZuO7r/NPKkHzDCoiaI8= -github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= -github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= +github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= +github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs= +github.com/bodgit/sevenzip v1.6.0 h1:a4R0Wu6/P1o1pP/3VV++aEOcyeBxeO/xE2Y9NSTrr6A= +github.com/bodgit/sevenzip v1.6.0/go.mod h1:zOBh9nJUof7tcrlqJFv1koWRrhz3LbDbUNngkuZxLMc= +github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= +github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc= github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/bubbles v0.19.0 h1:gKZkKXPP6GlDk6EcfujDK19PCQqRjaJZQ7QRERx1UF0= -github.com/charmbracelet/bubbles v0.19.0/go.mod h1:WILteEqZ+krG5c3ntGEMeG99nCupcuIk7V0/zOP0tOA= github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU= github.com/charmbracelet/bubbletea v1.1.0 h1:FjAl9eAL3HBCHenhz/ZPjkKdScmaS5SK69JAK2YJK9c= @@ -112,8 +152,15 @@ github.com/charmbracelet/x/term v0.2.0 h1:cNB9Ot9q8I711MyZ7myUR5HFWL/lc3OpU8jZ4h github.com/charmbracelet/x/term v0.2.0/go.mod h1:GVxgxAbjUrmpvIINHIQnJJKpMlHiZ4cktEQCN6GWyF0= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA= github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= @@ -142,10 +189,17 @@ github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1 github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo= github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4= +github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM= github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/fclairamb/go-log v0.5.0 h1:Gz9wSamEaA6lta4IU2cjJc2xSq5sV5VYSB5w/SUHhVc= +github.com/fclairamb/go-log v0.5.0/go.mod h1:XoRO1dYezpsGmLLkZE9I+sHqpqY65p8JA+Vqblb7k40= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/foxxorcat/mopan-sdk-go v0.1.6 h1:6J37oI4wMZLj8EPgSCcSTTIbnI5D6RCNW/srX8vQd1Y= @@ -157,19 +211,22 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= -github.com/gaoyb7/115drive-webdav v0.1.8 h1:EJt4PSmcbvBY4KUh2zSo5p6fN9LZFNkIzuKejipubVw= -github.com/gaoyb7/115drive-webdav v0.1.8/go.mod h1:BKbeY6j8SKs3+rzBFFALznGxbPmefEm3vA+dGhqgOGU= github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w= github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc= github.com/gin-contrib/cors v1.7.2 h1:oLDHxdg8W/XDoN/8zamqk/Drgt4oVZDvaV0YmvVICQw= github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -178,20 +235,14 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= -github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-resty/resty/v2 v2.14.0 h1:/rhkzsAqGQkozwfKS5aFAbb6TyKd3zyFRWcdRXLPCAU= github.com/go-resty/resty/v2 v2.14.0/go.mod h1:IW6mekUOsElt9C7oWr0XRt9BNSD6D5rr9mhk6NjmNHg= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= @@ -200,7 +251,6 @@ github.com/go-webauthn/webauthn v0.11.1 h1:5G/+dg91/VcaJHTtJUfwIlNJkLwbJCcnUc4W8 github.com/go-webauthn/webauthn v0.11.1/go.mod h1:YXRm1WG0OtUyDFaVAgB5KG7kVqW+6dYCJ7FTQH4SxEE= github.com/go-webauthn/x v0.1.12 h1:RjQ5cvApzyU/xLCiP+rub0PE4HBZsLggbxGR5ZpUf/A= github.com/go-webauthn/x v0.1.12/go.mod h1:XlRcGkNH8PT45TfeJYc6gqpOtiOendHhVmnOxh+5yHs= -github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -211,14 +261,32 @@ github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17w github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo= github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -227,6 +295,11 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-tpm v0.9.1 h1:0pGc4X//bAlmZzMKf8iz6IsDo1nYTbYJ6FZN/rg4zdM= github.com/google/go-tpm v0.9.1/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -234,21 +307,36 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI0= +github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M= +github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ= +github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg= github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI= github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= @@ -265,8 +353,6 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jaevor/go-nanoid v1.3.0 h1:nD+iepesZS6pr3uOVf20vR9GdGgJW1HPaR46gtrxzkg= -github.com/jaevor/go-nanoid v1.3.0/go.mod h1:SI+jFaPuddYkqkVQoNGHs81navCtH388TcrH0RqFKgY= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= @@ -282,24 +368,30 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg= github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c= +github.com/kdomanski/iso9660 v0.4.0 h1:BPKKdcINz3m0MdjIMwS0wx1nofsOjxOq8TOr45WGHFg= +github.com/kdomanski/iso9660 v0.4.0/go.mod h1:OxUSupHsO9ceI8lBLPJKWBTphLemjrCQY8LPXM7qSzU= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -308,7 +400,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/larksuite/oapi-sdk-go/v3 v3.3.1 h1:DLQQEgHUAGZB6RVlceB1f6A94O206exxW2RIMH+gMUc= github.com/larksuite/oapi-sdk-go/v3 v3.3.1/go.mod h1:ZEplY+kwuIrj/nqw5uSCINNATcH3KdxSN7y+UxYY5fI= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -326,9 +417,10 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE= +github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -340,6 +432,10 @@ github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/meilisearch/meilisearch-go v0.27.2 h1:3G21dJ5i208shnLPDsIEZ0L0Geg/5oeXABFV7nlK94k= github.com/meilisearch/meilisearch-go v0.27.2/go.mod h1:SxuSqDcPBIykjWz1PX+KzsYzArNLSCadQodWs8extS0= +github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q= +github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I= +github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= +github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/minio/sio v0.4.0 h1:u4SWVEm5lXSqU42ZWawV0D9I5AZ5YMmo2RXpEQ/kRhc= @@ -385,15 +481,17 @@ github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4 github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= -github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 h1:K3T3eu4h5aYIOzUtLjN08L4Qt4WGaJONMgcaD0ayBJQ= -github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831/go.mod h1:lSHD4lC4zlMl+zcoysdJcd5KFzsWwOD8BJbyg1Ws9Ng= +github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY= +github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= -github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -413,6 +511,7 @@ github.com/pquerna/otp v1.4.0 h1:wZvl1TIVxKRThZIBiwOOHOGP/1+nZyWBil9Y2XNEDzg= github.com/pquerna/otp v1.4.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= @@ -426,13 +525,17 @@ github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Ny github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= +github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA= +github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= +github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo= +github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc= github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU= @@ -441,16 +544,19 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/sorairolake/lzip-go v0.3.5 h1:ms5Xri9o1JBIWvOFAorYtUNik6HI3HgBTkISiqu0Cwg= +github.com/sorairolake/lzip-go v0.3.5/go.mod h1:N0KYq5iWrMXI0ZEXKXaS9hCyOjZUQdBDEIbXfoUwbdk= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -459,22 +565,25 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA= github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA= +github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 h1:6Y51mutOvRGRx6KqyMNo//xk8B8o6zW9/RVmy1VamOs= +github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543/go.mod h1:jpwqYA8KUVEvSUJHkCXsnBRJCSKP1BMa81QZ6kvRpow= +github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= +github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= @@ -487,38 +596,49 @@ github.com/u2takey/ffmpeg-go v0.5.0 h1:r7d86XuL7uLWJ5mzSeQ03uvjfIhiJYvsRAJFCW4uk github.com/u2takey/ffmpeg-go v0.5.0/go.mod h1:ruZWkvC1FEiUNjmROowOAps3ZcWxEiOpFoHCvk97kGc= github.com/u2takey/go-utils v0.3.1 h1:TaQTgmEZZeDHQFYfd+AdUT1cT4QJgJn/XVPELhHw4ys= github.com/u2takey/go-utils v0.3.1/go.mod h1:6e+v5vEZ/6gu12w/DC2ixZdZtCrNokVxD0JUklcqdCs= -github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= -github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/upyun/go-sdk/v3 v3.0.4 h1:2DCJa/Yi7/3ZybT9UCPATSzvU3wpPPxhXinNlb1Hi8Q= github.com/upyun/go-sdk/v3 v3.0.4/go.mod h1:P/SnuuwhrIgAVRd/ZpzDWqCsBAf/oHg7UggbAxyZa0E= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.37.1-0.20220607072126-8a320890c08d h1:xS9QTPgKl9ewGsAOPc+xW7DeStJDqYPfisDmeSCcbco= github.com/valyala/fasthttp v1.37.1-0.20220607072126-8a320890c08d/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I= -github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= -github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXorZG0KzTxbp0Cr1n3FEegfmyd9br1k= github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xhofe/115-sdk-go v0.1.5 h1:2+E92l6AX0+ABAkrdmDa9PE5ONN7wVLCaKkK80zETOg= +github.com/xhofe/115-sdk-go v0.1.5/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI= github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0= -github.com/xhofe/tache v0.1.2 h1:pHrXlrWcbTb4G7hVUDW7Rc+YTUnLJvnLBrdktVE1Fqg= -github.com/xhofe/tache v0.1.2/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ= +github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM= +github.com/xhofe/tache v0.1.5/go.mod h1:PYt6I/XUKliSg1uHlgsk6ha+le/f6PAvjUtFZAVl3a8= github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A= github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 h1:K8gF0eekWPEX+57l30ixxzGhHH/qscI3JCnuhbN6V4M= +github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhfAXj15352hTOuVmG5Gzo8xNRINfqI= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic= +github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 h1:X+lHsNTlbatQ1cErXIbtyrh+3MTWxqQFS+sBP/wpFXo= github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22/go.mod h1:1zGRDJd8zlG6P8azG96+uywfh6udYWwhOmUivw+xsuM= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= @@ -529,34 +649,59 @@ go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGX go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= +go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.19.0 h1:D9FX4QWkLfkeqaC62SonffIIuYdOk/UE2XKUBgRIBIQ= golang.org/x/image v0.19.0/go.mod h1:y0zrRqlQRWQ5PXaYCOMLTW2fpsxZ8Qh9I/ohnInJEys= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -564,27 +709,51 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -593,30 +762,38 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -626,25 +803,25 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -652,23 +829,45 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -681,17 +880,49 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= @@ -713,7 +944,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo= @@ -725,8 +955,18 @@ gorm.io/driver/sqlite v1.5.6/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDa gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg= gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +resty.dev/v3 v3.0.0-beta.2 h1:xu4mGAdbCLuc3kbk7eddWfWm4JfhwDtdapwss5nCjnQ= +resty.dev/v3 v3.0.0-beta.2/go.mod h1:OgkqiPvTDtOuV4MGZuUDhwOpkY8enjOsjjMzeOHefy4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/archive/all.go b/internal/archive/all.go new file mode 100644 index 00000000..63206cb8 --- /dev/null +++ b/internal/archive/all.go @@ -0,0 +1,9 @@ +package archive + +import ( + _ "github.com/alist-org/alist/v3/internal/archive/archives" + _ "github.com/alist-org/alist/v3/internal/archive/iso9660" + _ "github.com/alist-org/alist/v3/internal/archive/rardecode" + _ "github.com/alist-org/alist/v3/internal/archive/sevenzip" + _ "github.com/alist-org/alist/v3/internal/archive/zip" +) diff --git a/internal/archive/archives/archives.go b/internal/archive/archives/archives.go new file mode 100644 index 00000000..0a42cd0c --- /dev/null +++ b/internal/archive/archives/archives.go @@ -0,0 +1,141 @@ +package archives + +import ( + "io" + "io/fs" + "os" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/utils" +) + +type Archives struct { +} + +func (Archives) AcceptedExtensions() []string { + return []string{ + ".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", + } +} + +func (Archives) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{} +} + +func (Archives) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + fsys, err := getFs(ss[0], args) + if err != nil { + return nil, err + } + files, err := fsys.ReadDir(".") + if err != nil { + return nil, filterPassword(err) + } + + tree := make([]model.ObjTree, 0, len(files)) + for _, file := range files { + info, err := file.Info() + if err != nil { + continue + } + tree = append(tree, &model.ObjectTree{Object: *toModelObj(info)}) + } + return &model.ArchiveMetaInfo{ + Comment: "", + Encrypted: false, + Tree: tree, + }, nil +} + +func (Archives) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + fsys, err := getFs(ss[0], args.ArchiveArgs) + if err != nil { + return nil, err + } + innerPath := strings.TrimPrefix(args.InnerPath, "/") + if innerPath == "" { + innerPath = "." + } + obj, err := fsys.ReadDir(innerPath) + if err != nil { + return nil, filterPassword(err) + } + return utils.SliceConvert(obj, func(src os.DirEntry) (model.Obj, error) { + info, err := src.Info() + if err != nil { + return nil, err + } + return toModelObj(info), nil + }) +} + +func (Archives) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + fsys, err := getFs(ss[0], args.ArchiveArgs) + if err != nil { + return nil, 0, err + } + file, err := fsys.Open(strings.TrimPrefix(args.InnerPath, "/")) + if err != nil { + return nil, 0, filterPassword(err) + } + stat, err := file.Stat() + if err != nil { + return nil, 0, filterPassword(err) + } + return file, stat.Size(), nil +} + +func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + fsys, err := getFs(ss[0], args.ArchiveArgs) + if err != nil { + return err + } + isDir := false + path := strings.TrimPrefix(args.InnerPath, "/") + if path == "" { + isDir = true + path = "." + } else { + stat, err := fsys.Stat(path) + if err != nil { + return filterPassword(err) + } + if stat.IsDir() { + isDir = true + outputPath = stdpath.Join(outputPath, stat.Name()) + err = os.Mkdir(outputPath, 0700) + if err != nil { + return filterPassword(err) + } + } + } + if isDir { + err = fs.WalkDir(fsys, path, func(p string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + relPath := strings.TrimPrefix(p, path+"/") + dstPath := stdpath.Join(outputPath, relPath) + if d.IsDir() { + err = os.MkdirAll(dstPath, 0700) + } else { + dir := stdpath.Dir(dstPath) + err = decompress(fsys, p, dir, func(_ float64) {}) + } + return err + }) + } else { + err = decompress(fsys, path, outputPath, up) + } + return filterPassword(err) +} + +var _ tool.Tool = (*Archives)(nil) + +func init() { + tool.RegisterTool(Archives{}) +} diff --git a/internal/archive/archives/utils.go b/internal/archive/archives/utils.go new file mode 100644 index 00000000..2f499a10 --- /dev/null +++ b/internal/archive/archives/utils.go @@ -0,0 +1,85 @@ +package archives + +import ( + "io" + fs2 "io/fs" + "os" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/mholt/archives" +) + +func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.ArchiveFS, error) { + reader, err := stream.NewReadAtSeeker(ss, 0) + if err != nil { + return nil, err + } + if r, ok := reader.(*stream.RangeReadReadAtSeeker); ok { + r.InitHeadCache() + } + format, _, err := archives.Identify(ss.Ctx, ss.GetName(), reader) + if err != nil { + return nil, errs.UnknownArchiveFormat + } + extractor, ok := format.(archives.Extractor) + if !ok { + return nil, errs.UnknownArchiveFormat + } + switch f := format.(type) { + case archives.SevenZip: + f.Password = args.Password + case archives.Rar: + f.Password = args.Password + } + return &archives.ArchiveFS{ + Stream: io.NewSectionReader(reader, 0, ss.GetSize()), + Format: extractor, + Context: ss.Ctx, + }, nil +} + +func toModelObj(file os.FileInfo) *model.Object { + return &model.Object{ + Name: file.Name(), + Size: file.Size(), + Modified: file.ModTime(), + IsFolder: file.IsDir(), + } +} + +func filterPassword(err error) error { + if err != nil && strings.Contains(err.Error(), "password") { + return errs.WrongArchivePassword + } + return err +} + +func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgress) error { + rc, err := fsys.Open(filePath) + if err != nil { + return err + } + defer rc.Close() + stat, err := rc.Stat() + if err != nil { + return err + } + f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer f.Close() + _, err = utils.CopyWithBuffer(f, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: rc, + Size: stat.Size(), + }, + UpdateProgress: up, + }) + return err +} diff --git a/internal/archive/iso9660/iso9660.go b/internal/archive/iso9660/iso9660.go new file mode 100644 index 00000000..be107d7b --- /dev/null +++ b/internal/archive/iso9660/iso9660.go @@ -0,0 +1,100 @@ +package iso9660 + +import ( + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/kdomanski/iso9660" + "io" + "os" + stdpath "path" +) + +type ISO9660 struct { +} + +func (ISO9660) AcceptedExtensions() []string { + return []string{".iso"} +} + +func (ISO9660) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{} +} + +func (ISO9660) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + return &model.ArchiveMetaInfo{ + Comment: "", + Encrypted: false, + }, nil +} + +func (ISO9660) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + img, err := getImage(ss[0]) + if err != nil { + return nil, err + } + dir, err := getObj(img, args.InnerPath) + if err != nil { + return nil, err + } + if !dir.IsDir() { + return nil, errs.NotFolder + } + children, err := dir.GetChildren() + if err != nil { + return nil, err + } + ret := make([]model.Obj, 0, len(children)) + for _, child := range children { + ret = append(ret, toModelObj(child)) + } + return ret, nil +} + +func (ISO9660) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + img, err := getImage(ss[0]) + if err != nil { + return nil, 0, err + } + obj, err := getObj(img, args.InnerPath) + if err != nil { + return nil, 0, err + } + if obj.IsDir() { + return nil, 0, errs.NotFile + } + return io.NopCloser(obj.Reader()), obj.Size(), nil +} + +func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + img, err := getImage(ss[0]) + if err != nil { + return err + } + obj, err := getObj(img, args.InnerPath) + if err != nil { + return err + } + if obj.IsDir() { + if args.InnerPath != "/" { + outputPath = stdpath.Join(outputPath, obj.Name()) + if err = os.MkdirAll(outputPath, 0700); err != nil { + return err + } + } + var children []*iso9660.File + if children, err = obj.GetChildren(); err == nil { + err = decompressAll(children, outputPath) + } + } else { + err = decompress(obj, outputPath, up) + } + return err +} + +var _ tool.Tool = (*ISO9660)(nil) + +func init() { + tool.RegisterTool(ISO9660{}) +} diff --git a/internal/archive/iso9660/utils.go b/internal/archive/iso9660/utils.go new file mode 100644 index 00000000..0e4cfb1c --- /dev/null +++ b/internal/archive/iso9660/utils.go @@ -0,0 +1,101 @@ +package iso9660 + +import ( + "os" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/kdomanski/iso9660" +) + +func getImage(ss *stream.SeekableStream) (*iso9660.Image, error) { + reader, err := stream.NewReadAtSeeker(ss, 0) + if err != nil { + return nil, err + } + return iso9660.OpenImage(reader) +} + +func getObj(img *iso9660.Image, path string) (*iso9660.File, error) { + obj, err := img.RootDir() + if err != nil { + return nil, err + } + if path == "/" { + return obj, nil + } + paths := strings.Split(strings.TrimPrefix(path, "/"), "/") + for _, p := range paths { + if !obj.IsDir() { + return nil, errs.ObjectNotFound + } + children, err := obj.GetChildren() + if err != nil { + return nil, err + } + exist := false + for _, child := range children { + if child.Name() == p { + obj = child + exist = true + break + } + } + if !exist { + return nil, errs.ObjectNotFound + } + } + return obj, nil +} + +func toModelObj(file *iso9660.File) model.Obj { + return &model.Object{ + Name: file.Name(), + Size: file.Size(), + Modified: file.ModTime(), + IsFolder: file.IsDir(), + } +} + +func decompress(f *iso9660.File, path string, up model.UpdateProgress) error { + file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer file.Close() + _, err = utils.CopyWithBuffer(file, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: f.Reader(), + Size: f.Size(), + }, + UpdateProgress: up, + }) + return err +} + +func decompressAll(children []*iso9660.File, path string) error { + for _, child := range children { + if child.IsDir() { + nextChildren, err := child.GetChildren() + if err != nil { + return err + } + nextPath := stdpath.Join(path, child.Name()) + if err = os.MkdirAll(nextPath, 0700); err != nil { + return err + } + if err = decompressAll(nextChildren, nextPath); err != nil { + return err + } + } else { + if err := decompress(child, path, func(_ float64) {}); err != nil { + return err + } + } + } + return nil +} diff --git a/internal/archive/rardecode/rardecode.go b/internal/archive/rardecode/rardecode.go new file mode 100644 index 00000000..cd31d1a4 --- /dev/null +++ b/internal/archive/rardecode/rardecode.go @@ -0,0 +1,140 @@ +package rardecode + +import ( + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/nwaples/rardecode/v2" + "io" + "os" + stdpath "path" + "strings" +) + +type RarDecoder struct{} + +func (RarDecoder) AcceptedExtensions() []string { + return []string{".rar"} +} + +func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{ + ".part1.rar": {".part%d.rar", 2}, + } +} + +func (RarDecoder) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + l, err := list(ss, args.Password) + if err != nil { + return nil, err + } + _, tree := tool.GenerateMetaTreeFromFolderTraversal(l) + return &model.ArchiveMetaInfo{ + Comment: "", + Encrypted: false, + Tree: tree, + }, nil +} + +func (RarDecoder) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + return nil, errs.NotSupport +} + +func (RarDecoder) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + reader, err := getReader(ss, args.Password) + if err != nil { + return nil, 0, err + } + innerPath := strings.TrimPrefix(args.InnerPath, "/") + for { + var header *rardecode.FileHeader + header, err = reader.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, 0, err + } + if header.Name == innerPath { + if header.IsDir { + break + } + return io.NopCloser(reader), header.UnPackedSize, nil + } + } + return nil, 0, errs.ObjectNotFound +} + +func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + reader, err := getReader(ss, args.Password) + if err != nil { + return err + } + if args.InnerPath == "/" { + for { + var header *rardecode.FileHeader + header, err = reader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + name := header.Name + if header.IsDir { + name = name + "/" + } + err = decompress(reader, header, name, outputPath) + if err != nil { + return err + } + } + } else { + innerPath := strings.TrimPrefix(args.InnerPath, "/") + innerBase := stdpath.Base(innerPath) + createdBaseDir := false + for { + var header *rardecode.FileHeader + header, err = reader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + name := header.Name + if header.IsDir { + name = name + "/" + } + if name == innerPath { + err = _decompress(reader, header, outputPath, up) + if err != nil { + return err + } + break + } else if strings.HasPrefix(name, innerPath+"/") { + targetPath := stdpath.Join(outputPath, innerBase) + if !createdBaseDir { + err = os.Mkdir(targetPath, 0700) + if err != nil { + return err + } + createdBaseDir = true + } + restPath := strings.TrimPrefix(name, innerPath+"/") + err = decompress(reader, header, restPath, targetPath) + if err != nil { + return err + } + } + } + } + return nil +} + +var _ tool.Tool = (*RarDecoder)(nil) + +func init() { + tool.RegisterTool(RarDecoder{}) +} diff --git a/internal/archive/rardecode/utils.go b/internal/archive/rardecode/utils.go new file mode 100644 index 00000000..5790ec58 --- /dev/null +++ b/internal/archive/rardecode/utils.go @@ -0,0 +1,225 @@ +package rardecode + +import ( + "fmt" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/nwaples/rardecode/v2" + "io" + "io/fs" + "os" + stdpath "path" + "sort" + "strings" + "time" +) + +type VolumeFile struct { + stream.SStreamReadAtSeeker + name string +} + +func (v *VolumeFile) Name() string { + return v.name +} + +func (v *VolumeFile) Size() int64 { + return v.SStreamReadAtSeeker.GetRawStream().GetSize() +} + +func (v *VolumeFile) Mode() fs.FileMode { + return 0644 +} + +func (v *VolumeFile) ModTime() time.Time { + return v.SStreamReadAtSeeker.GetRawStream().ModTime() +} + +func (v *VolumeFile) IsDir() bool { + return false +} + +func (v *VolumeFile) Sys() any { + return nil +} + +func (v *VolumeFile) Stat() (fs.FileInfo, error) { + return v, nil +} + +func (v *VolumeFile) Close() error { + return nil +} + +type VolumeFs struct { + parts map[string]*VolumeFile +} + +func (v *VolumeFs) Open(name string) (fs.File, error) { + file, ok := v.parts[name] + if !ok { + return nil, fs.ErrNotExist + } + return file, nil +} + +func makeOpts(ss []*stream.SeekableStream) (string, rardecode.Option, error) { + if len(ss) == 1 { + reader, err := stream.NewReadAtSeeker(ss[0], 0) + if err != nil { + return "", nil, err + } + fileName := "file.rar" + fsys := &VolumeFs{parts: map[string]*VolumeFile{ + fileName: {SStreamReadAtSeeker: reader, name: fileName}, + }} + return fileName, rardecode.FileSystem(fsys), nil + } else { + parts := make(map[string]*VolumeFile, len(ss)) + for i, s := range ss { + reader, err := stream.NewReadAtSeeker(s, 0) + if err != nil { + return "", nil, err + } + fileName := fmt.Sprintf("file.part%d.rar", i+1) + parts[fileName] = &VolumeFile{SStreamReadAtSeeker: reader, name: fileName} + } + return "file.part1.rar", rardecode.FileSystem(&VolumeFs{parts: parts}), nil + } +} + +type WrapReader struct { + files []*rardecode.File +} + +func (r *WrapReader) Files() []tool.SubFile { + ret := make([]tool.SubFile, 0, len(r.files)) + for _, f := range r.files { + ret = append(ret, &WrapFile{File: f}) + } + return ret +} + +type WrapFile struct { + *rardecode.File +} + +func (f *WrapFile) Name() string { + if f.File.IsDir { + return f.File.Name + "/" + } + return f.File.Name +} + +func (f *WrapFile) FileInfo() fs.FileInfo { + return &WrapFileInfo{File: f.File} +} + +type WrapFileInfo struct { + *rardecode.File +} + +func (f *WrapFileInfo) Name() string { + return stdpath.Base(f.File.Name) +} + +func (f *WrapFileInfo) Size() int64 { + return f.File.UnPackedSize +} + +func (f *WrapFileInfo) ModTime() time.Time { + return f.File.ModificationTime +} + +func (f *WrapFileInfo) IsDir() bool { + return f.File.IsDir +} + +func (f *WrapFileInfo) Sys() any { + return nil +} + +func list(ss []*stream.SeekableStream, password string) (*WrapReader, error) { + fileName, fsOpt, err := makeOpts(ss) + if err != nil { + return nil, err + } + opts := []rardecode.Option{fsOpt} + if password != "" { + opts = append(opts, rardecode.Password(password)) + } + files, err := rardecode.List(fileName, opts...) + // rardecode输出文件列表的顺序不一定是父目录在前,子目录在后 + // 父路径的长度一定比子路径短,排序后的files可保证父路径在前 + sort.Slice(files, func(i, j int) bool { + return len(files[i].Name) < len(files[j].Name) + }) + if err != nil { + return nil, filterPassword(err) + } + return &WrapReader{files: files}, nil +} + +func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader, error) { + fileName, fsOpt, err := makeOpts(ss) + if err != nil { + return nil, err + } + opts := []rardecode.Option{fsOpt} + if password != "" { + opts = append(opts, rardecode.Password(password)) + } + rc, err := rardecode.OpenReader(fileName, opts...) + if err != nil { + return nil, filterPassword(err) + } + ss[0].Closers.Add(rc) + return &rc.Reader, nil +} + +func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error { + targetPath := outputPath + dir, base := stdpath.Split(filePath) + if dir != "" { + targetPath = stdpath.Join(targetPath, dir) + err := os.MkdirAll(targetPath, 0700) + if err != nil { + return err + } + } + if base != "" { + err := _decompress(reader, header, targetPath, func(_ float64) {}) + if err != nil { + return err + } + } + return nil +} + +func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error { + f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + _, err = io.Copy(f, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: reader, + Size: header.UnPackedSize, + }, + UpdateProgress: up, + }) + if err != nil { + return err + } + return nil +} + +func filterPassword(err error) error { + if err != nil && strings.Contains(err.Error(), "password") { + return errs.WrongArchivePassword + } + return err +} diff --git a/internal/archive/sevenzip/sevenzip.go b/internal/archive/sevenzip/sevenzip.go new file mode 100644 index 00000000..28169966 --- /dev/null +++ b/internal/archive/sevenzip/sevenzip.go @@ -0,0 +1,72 @@ +package sevenzip + +import ( + "io" + "strings" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" +) + +type SevenZip struct{} + +func (SevenZip) AcceptedExtensions() []string { + return []string{".7z"} +} + +func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{ + ".7z.001": {".7z.%.3d", 2}, + } +} + +func (SevenZip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + reader, err := getReader(ss, args.Password) + if err != nil { + return nil, err + } + _, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: reader}) + return &model.ArchiveMetaInfo{ + Comment: "", + Encrypted: args.Password != "", + Tree: tree, + }, nil +} + +func (SevenZip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + return nil, errs.NotSupport +} + +func (SevenZip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + reader, err := getReader(ss, args.Password) + if err != nil { + return nil, 0, err + } + innerPath := strings.TrimPrefix(args.InnerPath, "/") + for _, file := range reader.File { + if file.Name == innerPath { + r, e := file.Open() + if e != nil { + return nil, 0, e + } + return r, file.FileInfo().Size(), nil + } + } + return nil, 0, errs.ObjectNotFound +} + +func (SevenZip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + reader, err := getReader(ss, args.Password) + if err != nil { + return err + } + return tool.DecompressFromFolderTraversal(&WrapReader{Reader: reader}, outputPath, args, up) +} + +var _ tool.Tool = (*SevenZip)(nil) + +func init() { + tool.RegisterTool(SevenZip{}) +} diff --git a/internal/archive/sevenzip/utils.go b/internal/archive/sevenzip/utils.go new file mode 100644 index 00000000..624ba187 --- /dev/null +++ b/internal/archive/sevenzip/utils.go @@ -0,0 +1,61 @@ +package sevenzip + +import ( + "errors" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/bodgit/sevenzip" + "io" + "io/fs" +) + +type WrapReader struct { + Reader *sevenzip.Reader +} + +func (r *WrapReader) Files() []tool.SubFile { + ret := make([]tool.SubFile, 0, len(r.Reader.File)) + for _, f := range r.Reader.File { + ret = append(ret, &WrapFile{f: f}) + } + return ret +} + +type WrapFile struct { + f *sevenzip.File +} + +func (f *WrapFile) Name() string { + return f.f.Name +} + +func (f *WrapFile) FileInfo() fs.FileInfo { + return f.f.FileInfo() +} + +func (f *WrapFile) Open() (io.ReadCloser, error) { + return f.f.Open() +} + +func getReader(ss []*stream.SeekableStream, password string) (*sevenzip.Reader, error) { + readerAt, err := stream.NewMultiReaderAt(ss) + if err != nil { + return nil, err + } + sr, err := sevenzip.NewReaderWithPassword(readerAt, readerAt.Size(), password) + if err != nil { + return nil, filterPassword(err) + } + return sr, nil +} + +func filterPassword(err error) error { + if err != nil { + var e *sevenzip.ReadError + if errors.As(err, &e) && e.Encrypted { + return errs.WrongArchivePassword + } + } + return err +} diff --git a/internal/archive/tool/base.go b/internal/archive/tool/base.go new file mode 100644 index 00000000..8f5b10d9 --- /dev/null +++ b/internal/archive/tool/base.go @@ -0,0 +1,21 @@ +package tool + +import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "io" +) + +type MultipartExtension struct { + PartFileFormat string + SecondPartIndex int +} + +type Tool interface { + AcceptedExtensions() []string + AcceptedMultipartExtensions() map[string]MultipartExtension + GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) + List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) + Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) + Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error +} diff --git a/internal/archive/tool/helper.go b/internal/archive/tool/helper.go new file mode 100644 index 00000000..20da3446 --- /dev/null +++ b/internal/archive/tool/helper.go @@ -0,0 +1,204 @@ +package tool + +import ( + "io" + "io/fs" + "os" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" +) + +type SubFile interface { + Name() string + FileInfo() fs.FileInfo + Open() (io.ReadCloser, error) +} + +type CanEncryptSubFile interface { + IsEncrypted() bool + SetPassword(password string) +} + +type ArchiveReader interface { + Files() []SubFile +} + +func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree) { + encrypted := false + dirMap := make(map[string]*model.ObjectTree) + for _, file := range r.Files() { + if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() { + encrypted = true + } + + name := strings.TrimPrefix(file.Name(), "/") + var dir string + var dirObj *model.ObjectTree + isNewFolder := false + if !file.FileInfo().IsDir() { + // 先将 文件 添加到 所在的文件夹 + dir = stdpath.Dir(name) + dirObj = dirMap[dir] + if dirObj == nil { + isNewFolder = dir != "." + dirObj = &model.ObjectTree{} + dirObj.IsFolder = true + dirObj.Name = stdpath.Base(dir) + dirObj.Modified = file.FileInfo().ModTime() + dirMap[dir] = dirObj + } + dirObj.Children = append( + dirObj.Children, &model.ObjectTree{ + Object: *MakeModelObj(file.FileInfo()), + }, + ) + } else { + dir = strings.TrimSuffix(name, "/") + dirObj = dirMap[dir] + if dirObj == nil { + isNewFolder = dir != "." + dirObj = &model.ObjectTree{} + dirMap[dir] = dirObj + } + dirObj.IsFolder = true + dirObj.Name = stdpath.Base(dir) + dirObj.Modified = file.FileInfo().ModTime() + } + if isNewFolder { + // 将 文件夹 添加到 父文件夹 + // 考虑压缩包仅记录文件的路径,不记录文件夹 + // 循环创建所有父文件夹 + parentDir := stdpath.Dir(dir) + for { + parentDirObj := dirMap[parentDir] + if parentDirObj == nil { + parentDirObj = &model.ObjectTree{} + if parentDir != "." { + parentDirObj.IsFolder = true + parentDirObj.Name = stdpath.Base(parentDir) + parentDirObj.Modified = file.FileInfo().ModTime() + } + dirMap[parentDir] = parentDirObj + } + parentDirObj.Children = append(parentDirObj.Children, dirObj) + + parentDir = stdpath.Dir(parentDir) + if dirMap[parentDir] != nil { + break + } + dirObj = parentDirObj + } + } + } + if len(dirMap) > 0 { + return encrypted, dirMap["."].GetChildren() + } else { + return encrypted, nil + } +} + +func MakeModelObj(file os.FileInfo) *model.Object { + return &model.Object{ + Name: file.Name(), + Size: file.Size(), + Modified: file.ModTime(), + IsFolder: file.IsDir(), + } +} + +type WrapFileInfo struct { + model.Obj +} + +func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + var err error + files := r.Files() + if args.InnerPath == "/" { + for i, file := range files { + name := file.Name() + err = decompress(file, name, outputPath, args.Password) + if err != nil { + return err + } + up(float64(i+1) * 100.0 / float64(len(files))) + } + } else { + innerPath := strings.TrimPrefix(args.InnerPath, "/") + innerBase := stdpath.Base(innerPath) + createdBaseDir := false + for _, file := range files { + name := file.Name() + if name == innerPath { + err = _decompress(file, outputPath, args.Password, up) + if err != nil { + return err + } + break + } else if strings.HasPrefix(name, innerPath+"/") { + targetPath := stdpath.Join(outputPath, innerBase) + if !createdBaseDir { + err = os.Mkdir(targetPath, 0700) + if err != nil { + return err + } + createdBaseDir = true + } + restPath := strings.TrimPrefix(name, innerPath+"/") + err = decompress(file, restPath, targetPath, args.Password) + if err != nil { + return err + } + } + } + } + return nil +} + +func decompress(file SubFile, filePath, outputPath, password string) error { + targetPath := outputPath + dir, base := stdpath.Split(filePath) + if dir != "" { + targetPath = stdpath.Join(targetPath, dir) + err := os.MkdirAll(targetPath, 0700) + if err != nil { + return err + } + } + if base != "" { + err := _decompress(file, targetPath, password, func(_ float64) {}) + if err != nil { + return err + } + } + return nil +} + +func _decompress(file SubFile, targetPath, password string, up model.UpdateProgress) error { + if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() { + encrypt.SetPassword(password) + } + rc, err := file.Open() + if err != nil { + return err + } + defer func() { _ = rc.Close() }() + f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + _, err = io.Copy(f, &stream.ReaderUpdatingProgress{ + Reader: &stream.SimpleReaderWithSize{ + Reader: rc, + Size: file.FileInfo().Size(), + }, + UpdateProgress: up, + }) + if err != nil { + return err + } + return nil +} diff --git a/internal/archive/tool/utils.go b/internal/archive/tool/utils.go new file mode 100644 index 00000000..aa92cb1d --- /dev/null +++ b/internal/archive/tool/utils.go @@ -0,0 +1,32 @@ +package tool + +import ( + "github.com/alist-org/alist/v3/internal/errs" +) + +var ( + Tools = make(map[string]Tool) + MultipartExtensions = make(map[string]MultipartExtension) +) + +func RegisterTool(tool Tool) { + for _, ext := range tool.AcceptedExtensions() { + Tools[ext] = tool + } + for mainFile, ext := range tool.AcceptedMultipartExtensions() { + MultipartExtensions[mainFile] = ext + Tools[mainFile] = tool + } +} + +func GetArchiveTool(ext string) (*MultipartExtension, Tool, error) { + t, ok := Tools[ext] + if !ok { + return nil, nil, errs.UnknownArchiveFormat + } + partExt, ok := MultipartExtensions[ext] + if !ok { + return nil, t, nil + } + return &partExt, t, nil +} diff --git a/internal/archive/zip/utils.go b/internal/archive/zip/utils.go new file mode 100644 index 00000000..59f4ed51 --- /dev/null +++ b/internal/archive/zip/utils.go @@ -0,0 +1,195 @@ +package zip + +import ( + "bytes" + "io" + "io/fs" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/saintfish/chardet" + "github.com/yeka/zip" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/japanese" + "golang.org/x/text/encoding/korean" + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/encoding/traditionalchinese" + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/encoding/unicode/utf32" + "golang.org/x/text/transform" +) + +type WrapReader struct { + Reader *zip.Reader +} + +func (r *WrapReader) Files() []tool.SubFile { + ret := make([]tool.SubFile, 0, len(r.Reader.File)) + for _, f := range r.Reader.File { + ret = append(ret, &WrapFile{f: f}) + } + return ret +} + +type WrapFileInfo struct { + fs.FileInfo +} + +func (f *WrapFileInfo) Name() string { + return decodeName(f.FileInfo.Name()) +} + +type WrapFile struct { + f *zip.File +} + +func (f *WrapFile) Name() string { + return decodeName(f.f.Name) +} + +func (f *WrapFile) FileInfo() fs.FileInfo { + return &WrapFileInfo{FileInfo: f.f.FileInfo()} +} + +func (f *WrapFile) Open() (io.ReadCloser, error) { + return f.f.Open() +} + +func (f *WrapFile) IsEncrypted() bool { + return f.f.IsEncrypted() +} + +func (f *WrapFile) SetPassword(password string) { + f.f.SetPassword(password) +} + +func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) { + if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" { + // FIXME: Incorrect parsing method for standard multipart zip format + ss = append(ss[1:], ss[0]) + } + reader, err := stream.NewMultiReaderAt(ss) + if err != nil { + return nil, err + } + return zip.NewReader(reader, reader.Size()) +} + +func filterPassword(err error) error { + if err != nil && strings.Contains(err.Error(), "password") { + return errs.WrongArchivePassword + } + return err +} + +func decodeName(name string) string { + b := []byte(name) + detector := chardet.NewTextDetector() + results, err := detector.DetectAll(b) + if err != nil { + return name + } + var ce, re, enc encoding.Encoding + for _, r := range results { + if r.Confidence > 30 { + ce = getCommonEncoding(r.Charset) + if ce != nil { + break + } + } + if re == nil { + re = getEncoding(r.Charset) + } + } + if ce != nil { + enc = ce + } else if re != nil { + enc = re + } else { + return name + } + i := bytes.NewReader(b) + decoder := transform.NewReader(i, enc.NewDecoder()) + content, _ := io.ReadAll(decoder) + return string(content) +} + +func getCommonEncoding(name string) (enc encoding.Encoding) { + switch name { + case "UTF-8": + enc = unicode.UTF8 + case "UTF-16LE": + enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM) + case "Shift_JIS": + enc = japanese.ShiftJIS + case "GB-18030": + enc = simplifiedchinese.GB18030 + case "EUC-KR": + enc = korean.EUCKR + case "Big5": + enc = traditionalchinese.Big5 + default: + enc = nil + } + return +} + +func getEncoding(name string) (enc encoding.Encoding) { + switch name { + case "UTF-8": + enc = unicode.UTF8 + case "UTF-16BE": + enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM) + case "UTF-16LE": + enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM) + case "UTF-32BE": + enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM) + case "UTF-32LE": + enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM) + case "ISO-8859-1": + enc = charmap.ISO8859_1 + case "ISO-8859-2": + enc = charmap.ISO8859_2 + case "ISO-8859-3": + enc = charmap.ISO8859_3 + case "ISO-8859-4": + enc = charmap.ISO8859_4 + case "ISO-8859-5": + enc = charmap.ISO8859_5 + case "ISO-8859-6": + enc = charmap.ISO8859_6 + case "ISO-8859-7": + enc = charmap.ISO8859_7 + case "ISO-8859-8": + enc = charmap.ISO8859_8 + case "ISO-8859-8-I": + enc = charmap.ISO8859_8I + case "ISO-8859-9": + enc = charmap.ISO8859_9 + case "windows-1251": + enc = charmap.Windows1251 + case "windows-1256": + enc = charmap.Windows1256 + case "KOI8-R": + enc = charmap.KOI8R + case "Shift_JIS": + enc = japanese.ShiftJIS + case "GB-18030": + enc = simplifiedchinese.GB18030 + case "EUC-JP": + enc = japanese.EUCJP + case "EUC-KR": + enc = korean.EUCKR + case "Big5": + enc = traditionalchinese.Big5 + case "ISO-2022-JP": + enc = japanese.ISO2022JP + default: + enc = nil + } + return +} diff --git a/internal/archive/zip/zip.go b/internal/archive/zip/zip.go new file mode 100644 index 00000000..6e23570c --- /dev/null +++ b/internal/archive/zip/zip.go @@ -0,0 +1,132 @@ +package zip + +import ( + "io" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" +) + +type Zip struct { +} + +func (Zip) AcceptedExtensions() []string { + return []string{} +} + +func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension { + return map[string]tool.MultipartExtension{ + ".zip": {".z%.2d", 1}, + ".zip.001": {".zip.%.3d", 2}, + } +} + +func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { + zipReader, err := getReader(ss) + if err != nil { + return nil, err + } + encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader}) + return &model.ArchiveMetaInfo{ + Comment: zipReader.Comment, + Encrypted: encrypted, + Tree: tree, + }, nil +} + +func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { + zipReader, err := getReader(ss) + if err != nil { + return nil, err + } + if args.InnerPath == "/" { + ret := make([]model.Obj, 0) + passVerified := false + var dir *model.Object + for _, file := range zipReader.File { + if !passVerified && file.IsEncrypted() { + file.SetPassword(args.Password) + rc, e := file.Open() + if e != nil { + return nil, filterPassword(e) + } + _ = rc.Close() + passVerified = true + } + name := strings.TrimSuffix(decodeName(file.Name), "/") + if strings.Contains(name, "/") { + // 有些压缩包不压缩第一个文件夹 + strs := strings.Split(name, "/") + if dir == nil && len(strs) == 2 { + dir = &model.Object{ + Name: strs[0], + Modified: ss[0].ModTime(), + IsFolder: true, + } + } + continue + } + ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()})) + } + if len(ret) == 0 && dir != nil { + ret = append(ret, dir) + } + return ret, nil + } else { + innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/" + ret := make([]model.Obj, 0) + exist := false + for _, file := range zipReader.File { + name := decodeName(file.Name) + dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/" + if dir != innerPath { + continue + } + exist = true + ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()})) + } + if !exist { + return nil, errs.ObjectNotFound + } + return ret, nil + } +} + +func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + zipReader, err := getReader(ss) + if err != nil { + return nil, 0, err + } + innerPath := strings.TrimPrefix(args.InnerPath, "/") + for _, file := range zipReader.File { + if decodeName(file.Name) == innerPath { + if file.IsEncrypted() { + file.SetPassword(args.Password) + } + r, e := file.Open() + if e != nil { + return nil, 0, e + } + return r, file.FileInfo().Size(), nil + } + } + return nil, 0, errs.ObjectNotFound +} + +func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { + zipReader, err := getReader(ss) + if err != nil { + return err + } + return tool.DecompressFromFolderTraversal(&WrapReader{Reader: zipReader}, outputPath, args, up) +} + +var _ tool.Tool = (*Zip)(nil) + +func init() { + tool.RegisterTool(Zip{}) +} diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index 27174c23..db3e2094 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -9,6 +9,7 @@ import ( "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/pkg/utils" "github.com/caarlos0/env/v9" log "github.com/sirupsen/logrus" @@ -34,6 +35,8 @@ func InitConfig() { log.Fatalf("failed to create config file: %+v", err) } conf.Conf = conf.DefaultConfig() + LastLaunchedVersion = conf.Version + conf.Conf.LastLaunchedVersion = conf.Version if !utils.WriteJsonToFile(configPath, conf.Conf) { log.Fatalf("failed to create default config file") } @@ -47,6 +50,10 @@ func InitConfig() { if err != nil { log.Fatalf("load config error: %+v", err) } + LastLaunchedVersion = conf.Conf.LastLaunchedVersion + if strings.HasPrefix(conf.Version, "v") || LastLaunchedVersion == "" { + conf.Conf.LastLaunchedVersion = conf.Version + } // update config.json struct confBody, err := utils.Json.MarshalIndent(conf.Conf, "", " ") if err != nil { @@ -57,6 +64,9 @@ func InitConfig() { log.Fatalf("update config struct error: %+v", err) } } + if conf.Conf.MaxConcurrency > 0 { + net.DefaultConcurrencyLimit = &net.ConcurrencyLimit{Limit: conf.Conf.MaxConcurrency} + } if !conf.Conf.Force { confFromEnv() } diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index 920a7a2d..407a5c64 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -1,8 +1,11 @@ package data import ( + "strconv" + "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/db" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/offline_download/tool" "github.com/alist-org/alist/v3/internal/op" @@ -21,17 +24,19 @@ func initSettings() { if err != nil { utils.Log.Fatalf("failed get settings: %+v", err) } - for i := range settings { - if !isActive(settings[i].Key) && settings[i].Flag != model.DEPRECATED { - settings[i].Flag = model.DEPRECATED - err = op.SaveSettingItem(&settings[i]) + settingMap := map[string]*model.SettingItem{} + for _, v := range settings { + if !isActive(v.Key) && v.Flag != model.DEPRECATED { + v.Flag = model.DEPRECATED + err = op.SaveSettingItem(&v) if err != nil { utils.Log.Fatalf("failed save setting: %+v", err) } } + settingMap[v.Key] = &v } - // create or save setting + save := false for i := range initialSettingItems { item := &initialSettingItems[i] item.Index = uint(i) @@ -39,26 +44,33 @@ func initSettings() { item.PreDefault = item.Value } // err - stored, err := op.GetSettingItemByKey(item.Key) - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - utils.Log.Fatalf("failed get setting: %+v", err) - continue + stored, ok := settingMap[item.Key] + if !ok { + stored, err = op.GetSettingItemByKey(item.Key) + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + utils.Log.Fatalf("failed get setting: %+v", err) + continue + } } - // save if stored != nil && item.Key != conf.VERSION && stored.Value != item.PreDefault { item.Value = stored.Value } + _, err = op.HandleSettingItemHook(item) + if err != nil { + utils.Log.Errorf("failed to execute hook on %s: %+v", item.Key, err) + continue + } + // save if stored == nil || *item != *stored { - err = op.SaveSettingItem(item) - if err != nil { - utils.Log.Fatalf("failed save setting: %+v", err) - } + save = true + } + } + if save { + err = db.SaveSettingItems(initialSettingItems) + if err != nil { + utils.Log.Fatalf("failed save setting: %+v", err) } else { - // Not save so needs to execute hook - _, err = op.HandleSettingItemHook(item) - if err != nil { - utils.Log.Errorf("failed to execute hook on %s: %+v", item.Key, err) - } + op.SettingCacheUpdate() } } } @@ -104,7 +116,7 @@ func InitialSettings() []model.SettingItem { {Key: conf.VideoTypes, Value: "mp4,mkv,avi,mov,rmvb,webm,flv,m3u8", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, {Key: conf.ImageTypes, Value: "jpg,tiff,jpeg,png,gif,bmp,svg,ico,swf,webp", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, //{Key: conf.OfficeTypes, Value: "doc,docx,xls,xlsx,ppt,pptx", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, - {Key: conf.ProxyTypes, Value: "m3u8", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, + {Key: conf.ProxyTypes, Value: "m3u8,url", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, {Key: conf.ProxyIgnoreHeaders, Value: "authorization,referer", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, {Key: "external_previews", Value: `{}`, Type: conf.TypeText, Group: model.PREVIEW}, {Key: "iframe_previews", Value: `{ @@ -129,6 +141,9 @@ func InitialSettings() []model.SettingItem { {Key: "audio_cover", Value: "https://jsd.nn.ci/gh/alist-org/logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW}, {Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, {Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, + {Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, + {Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, + {Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW}, // global settings {Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL}, {Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL}, @@ -164,6 +179,7 @@ func InitialSettings() []model.SettingItem { {Key: conf.SSOApplicationName, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSOEndpointName, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSOJwtPublicKey, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, + {Key: conf.SSOExtraScopes, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSOAutoRegister, Value: "false", Type: conf.TypeBool, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSODefaultDir, Value: "/", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE}, {Key: conf.SSODefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.SSO, Flag: model.PRIVATE}, @@ -180,10 +196,32 @@ func InitialSettings() []model.SettingItem { {Key: conf.LdapDefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.LDAP, Flag: model.PRIVATE}, {Key: conf.LdapLoginTips, Value: "login with ldap", Type: conf.TypeString, Group: model.LDAP, Flag: model.PUBLIC}, - //s3 settings + // s3 settings {Key: conf.S3AccessKeyId, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, {Key: conf.S3SecretAccessKey, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, {Key: conf.S3Buckets, Value: "[]", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE}, + + // ftp settings + {Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " + + "Chrome/87.0.4280.88 Safari/537.36", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPMandatoryTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, + {Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE}, + + // traffic settings + {Key: conf.TaskOfflineDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Download.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskOfflineDownloadTransferThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Transfer.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Upload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskCopyThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Copy.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskDecompressDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Decompress.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.TaskDecompressUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.DecompressUpload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.StreamMaxClientDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.StreamMaxClientUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.StreamMaxServerDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, + {Key: conf.StreamMaxServerUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE}, } initialSettingItems = append(initialSettingItems, tool.Tools.Items()...) if flags.Dev { diff --git a/internal/bootstrap/data/user.go b/internal/bootstrap/data/user.go index 3b71e498..9c3f8962 100644 --- a/internal/bootstrap/data/user.go +++ b/internal/bootstrap/data/user.go @@ -32,6 +32,8 @@ func initUser() { Role: model.ADMIN, BasePath: "/", Authn: "[]", + // 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives) + Permission: 0x30FF, } if err := op.CreateUser(admin); err != nil { panic(err) @@ -63,39 +65,4 @@ func initUser() { utils.Log.Fatalf("[init user] Failed to get guest user: %v", err) } } - hashPwdForOldVersion() - updateAuthnForOldVersion() -} - -func hashPwdForOldVersion() { - users, _, err := op.GetUsers(1, -1) - if err != nil { - utils.Log.Fatalf("[hash pwd for old version] failed get users: %v", err) - } - for i := range users { - user := users[i] - if user.PwdHash == "" { - user.SetPassword(user.Password) - user.Password = "" - if err := db.UpdateUser(&user); err != nil { - utils.Log.Fatalf("[hash pwd for old version] failed update user: %v", err) - } - } - } -} - -func updateAuthnForOldVersion() { - users, _, err := op.GetUsers(1, -1) - if err != nil { - utils.Log.Fatalf("[update authn for old version] failed get users: %v", err) - } - for i := range users { - user := users[i] - if user.Authn == "" { - user.Authn = "[]" - if err := db.UpdateUser(&user); err != nil { - utils.Log.Fatalf("[update authn for old version] failed update user: %v", err) - } - } - } } diff --git a/internal/bootstrap/db.go b/internal/bootstrap/db.go index 5dfa2820..5f5f6fce 100644 --- a/internal/bootstrap/db.go +++ b/internal/bootstrap/db.go @@ -56,20 +56,25 @@ func InitDB() { } case "mysql": { - //[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] - dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local&tls=%s", - database.User, database.Password, database.Host, database.Port, database.Name, database.SSLMode) - if database.DSN != "" { - dsn = database.DSN + dsn := database.DSN + if dsn == "" { + //[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] + dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local&tls=%s", + database.User, database.Password, database.Host, database.Port, database.Name, database.SSLMode) } dB, err = gorm.Open(mysql.Open(dsn), gormConfig) } case "postgres": { - dsn := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai", - database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode) - if database.DSN != "" { - dsn = database.DSN + dsn := database.DSN + if dsn == "" { + if database.Password != "" { + dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai", + database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode) + } else { + dsn = fmt.Sprintf("host=%s user=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai", + database.Host, database.User, database.Name, database.Port, database.SSLMode) + } } dB, err = gorm.Open(postgres.Open(dsn), gormConfig) } diff --git a/internal/bootstrap/patch.go b/internal/bootstrap/patch.go new file mode 100644 index 00000000..5c7ca758 --- /dev/null +++ b/internal/bootstrap/patch.go @@ -0,0 +1,74 @@ +package bootstrap + +import ( + "fmt" + + "github.com/alist-org/alist/v3/internal/bootstrap/patch" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/pkg/utils" + "strings" +) + +var LastLaunchedVersion = "" + +func safeCall(v string, i int, f func()) { + defer func() { + if r := recover(); r != nil { + utils.Log.Errorf("Recovered from patch (version: %s, index: %d) panic: %v", v, i, r) + } + }() + + f() +} + +func getVersion(v string) (major, minor, patchNum int, err error) { + _, err = fmt.Sscanf(v, "v%d.%d.%d", &major, &minor, &patchNum) + return major, minor, patchNum, err +} + +func compareVersion(majorA, minorA, patchNumA, majorB, minorB, patchNumB int) bool { + if majorA != majorB { + return majorA > majorB + } + if minorA != minorB { + return minorA > minorB + } + if patchNumA != patchNumB { + return patchNumA > patchNumB + } + return true +} + +func InitUpgradePatch() { + if !strings.HasPrefix(conf.Version, "v") { + for _, vp := range patch.UpgradePatches { + for i, p := range vp.Patches { + safeCall(vp.Version, i, p) + } + } + return + } + if LastLaunchedVersion == conf.Version { + return + } + if LastLaunchedVersion == "" { + LastLaunchedVersion = "v0.0.0" + } + major, minor, patchNum, err := getVersion(LastLaunchedVersion) + if err != nil { + utils.Log.Warnf("Failed to parse last launched version %s: %v, skipping all patches and rewrite last launched version", LastLaunchedVersion, err) + return + } + for _, vp := range patch.UpgradePatches { + ma, mi, pn, err := getVersion(vp.Version) + if err != nil { + utils.Log.Errorf("Skip invalid version %s patches: %v", vp.Version, err) + continue + } + if compareVersion(ma, mi, pn, major, minor, patchNum) { + for i, p := range vp.Patches { + safeCall(vp.Version, i, p) + } + } + } +} diff --git a/internal/bootstrap/patch/all.go b/internal/bootstrap/patch/all.go new file mode 100644 index 00000000..b363d129 --- /dev/null +++ b/internal/bootstrap/patch/all.go @@ -0,0 +1,35 @@ +package patch + +import ( + "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_24_0" + "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_32_0" + "github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_41_0" +) + +type VersionPatches struct { + // Version means if the system is upgraded from Version or an earlier one + // to the current version, all patches in Patches will be executed. + Version string + Patches []func() +} + +var UpgradePatches = []VersionPatches{ + { + Version: "v3.24.0", + Patches: []func(){ + v3_24_0.HashPwdForOldVersion, + }, + }, + { + Version: "v3.32.0", + Patches: []func(){ + v3_32_0.UpdateAuthnForOldVersion, + }, + }, + { + Version: "v3.41.0", + Patches: []func(){ + v3_41_0.GrantAdminPermissions, + }, + }, +} diff --git a/internal/bootstrap/patch/v3_24_0/hash_password.go b/internal/bootstrap/patch/v3_24_0/hash_password.go new file mode 100644 index 00000000..2adb640d --- /dev/null +++ b/internal/bootstrap/patch/v3_24_0/hash_password.go @@ -0,0 +1,26 @@ +package v3_24_0 + +import ( + "github.com/alist-org/alist/v3/internal/db" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" +) + +// HashPwdForOldVersion encode passwords using SHA256 +// First published: 75acbcc perf: sha256 for user's password (close #3552) by Andy Hsu +func HashPwdForOldVersion() { + users, _, err := op.GetUsers(1, -1) + if err != nil { + utils.Log.Fatalf("[hash pwd for old version] failed get users: %v", err) + } + for i := range users { + user := users[i] + if user.PwdHash == "" { + user.SetPassword(user.Password) + user.Password = "" + if err := db.UpdateUser(&user); err != nil { + utils.Log.Fatalf("[hash pwd for old version] failed update user: %v", err) + } + } + } +} diff --git a/internal/bootstrap/patch/v3_32_0/update_authn.go b/internal/bootstrap/patch/v3_32_0/update_authn.go new file mode 100644 index 00000000..92a594fd --- /dev/null +++ b/internal/bootstrap/patch/v3_32_0/update_authn.go @@ -0,0 +1,25 @@ +package v3_32_0 + +import ( + "github.com/alist-org/alist/v3/internal/db" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" +) + +// UpdateAuthnForOldVersion updates users' authn +// First published: bdfc159 fix: webauthn logspam (#6181) by itsHenry +func UpdateAuthnForOldVersion() { + users, _, err := op.GetUsers(1, -1) + if err != nil { + utils.Log.Fatalf("[update authn for old version] failed get users: %v", err) + } + for i := range users { + user := users[i] + if user.Authn == "" { + user.Authn = "[]" + if err := db.UpdateUser(&user); err != nil { + utils.Log.Fatalf("[update authn for old version] failed update user: %v", err) + } + } + } +} diff --git a/internal/bootstrap/patch/v3_41_0/grant_permission.go b/internal/bootstrap/patch/v3_41_0/grant_permission.go new file mode 100644 index 00000000..60d8ab4f --- /dev/null +++ b/internal/bootstrap/patch/v3_41_0/grant_permission.go @@ -0,0 +1,21 @@ +package v3_41_0 + +import ( + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" +) + +// GrantAdminPermissions gives admin Permission 0(can see hidden) - 9(webdav manage) and +// 12(can read archives) - 13(can decompress archives) +// This patch is written to help users upgrading from older version better adapt to PR AlistGo/alist#7705 and +// PR AlistGo/alist#7817. +func GrantAdminPermissions() { + admin, err := op.GetAdmin() + if err == nil && (admin.Permission & 0x33FF) == 0 { + admin.Permission |= 0x33FF + err = op.UpdateUser(admin) + } + if err != nil { + utils.Log.Errorf("Cannot grant permissions to admin: %v", err) + } +} diff --git a/internal/bootstrap/stream_limit.go b/internal/bootstrap/stream_limit.go new file mode 100644 index 00000000..5ece71e4 --- /dev/null +++ b/internal/bootstrap/stream_limit.go @@ -0,0 +1,53 @@ +package bootstrap + +import ( + "context" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/stream" + "golang.org/x/time/rate" +) + +type blockBurstLimiter struct { + *rate.Limiter +} + +func (l blockBurstLimiter) WaitN(ctx context.Context, total int) error { + for total > 0 { + n := l.Burst() + if l.Limiter.Limit() == rate.Inf || n > total { + n = total + } + err := l.Limiter.WaitN(ctx, n) + if err != nil { + return err + } + total -= n + } + return nil +} + +func streamFilterNegative(limit int) (rate.Limit, int) { + if limit < 0 { + return rate.Inf, 0 + } + return rate.Limit(limit) * 1024.0, limit * 1024 +} + +func initLimiter(limiter *stream.Limiter, s string) { + clientDownLimit, burst := streamFilterNegative(setting.GetInt(s, -1)) + *limiter = blockBurstLimiter{Limiter: rate.NewLimiter(clientDownLimit, burst)} + op.RegisterSettingChangingCallback(func() { + newLimit, newBurst := streamFilterNegative(setting.GetInt(s, -1)) + (*limiter).SetLimit(newLimit) + (*limiter).SetBurst(newBurst) + }) +} + +func InitStreamLimit() { + initLimiter(&stream.ClientDownloadLimit, conf.StreamMaxClientDownloadSpeed) + initLimiter(&stream.ClientUploadLimit, conf.StreamMaxClientUploadSpeed) + initLimiter(&stream.ServerDownloadLimit, conf.StreamMaxServerDownloadSpeed) + initLimiter(&stream.ServerUploadLimit, conf.StreamMaxServerUploadSpeed) +} diff --git a/internal/bootstrap/task.go b/internal/bootstrap/task.go index 33902353..c67e3029 100644 --- a/internal/bootstrap/task.go +++ b/internal/bootstrap/task.go @@ -5,15 +5,44 @@ import ( "github.com/alist-org/alist/v3/internal/db" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/offline_download/tool" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" "github.com/xhofe/tache" ) +func taskFilterNegative(num int) int64 { + if num < 0 { + num = 0 + } + return int64(num) +} + func InitTaskManager() { - fs.UploadTaskManager = tache.NewManager[*fs.UploadTask](tache.WithWorks(conf.Conf.Tasks.Upload.Workers), tache.WithMaxRetry(conf.Conf.Tasks.Upload.MaxRetry)) //upload will not support persist - fs.CopyTaskManager = tache.NewManager[*fs.CopyTask](tache.WithWorks(conf.Conf.Tasks.Copy.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant), db.UpdateTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Copy.MaxRetry)) - tool.DownloadTaskManager = tache.NewManager[*tool.DownloadTask](tache.WithWorks(conf.Conf.Tasks.Download.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant), db.UpdateTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Download.MaxRetry)) - tool.TransferTaskManager = tache.NewManager[*tool.TransferTask](tache.WithWorks(conf.Conf.Tasks.Transfer.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant), db.UpdateTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Transfer.MaxRetry)) + fs.UploadTaskManager = tache.NewManager[*fs.UploadTask](tache.WithWorks(setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers)), tache.WithMaxRetry(conf.Conf.Tasks.Upload.MaxRetry)) //upload will not support persist + op.RegisterSettingChangingCallback(func() { + fs.UploadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers))) + }) + fs.CopyTaskManager = tache.NewManager[*fs.CopyTask](tache.WithWorks(setting.GetInt(conf.TaskCopyThreadsNum, conf.Conf.Tasks.Copy.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant), db.UpdateTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Copy.MaxRetry)) + op.RegisterSettingChangingCallback(func() { + fs.CopyTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskCopyThreadsNum, conf.Conf.Tasks.Copy.Workers))) + }) + tool.DownloadTaskManager = tache.NewManager[*tool.DownloadTask](tache.WithWorks(setting.GetInt(conf.TaskOfflineDownloadThreadsNum, conf.Conf.Tasks.Download.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant), db.UpdateTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Download.MaxRetry)) + op.RegisterSettingChangingCallback(func() { + tool.DownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadThreadsNum, conf.Conf.Tasks.Download.Workers))) + }) + tool.TransferTaskManager = tache.NewManager[*tool.TransferTask](tache.WithWorks(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant), db.UpdateTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Transfer.MaxRetry)) + op.RegisterSettingChangingCallback(func() { + tool.TransferTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers))) + }) if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted CleanTempDir() } + fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry)) + op.RegisterSettingChangingCallback(func() { + fs.ArchiveDownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers))) + }) + fs.ArchiveContentUploadTaskManager.Manager = tache.NewManager[*fs.ArchiveContentUploadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressUploadThreadsNum, conf.Conf.Tasks.DecompressUpload.Workers)), tache.WithMaxRetry(conf.Conf.Tasks.DecompressUpload.MaxRetry)) //decompress upload will not support persist + op.RegisterSettingChangingCallback(func() { + fs.ArchiveContentUploadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressUploadThreadsNum, conf.Conf.Tasks.DecompressUpload.Workers))) + }) } diff --git a/internal/conf/config.go b/internal/conf/config.go index c5dc9c52..cdb86fee 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -35,6 +35,7 @@ type Scheme struct { KeyFile string `json:"key_file" env:"KEY_FILE"` UnixFile string `json:"unix_file" env:"UNIX_FILE"` UnixFilePerm string `json:"unix_file_perm" env:"UNIX_FILE_PERM"` + EnableH2c bool `json:"enable_h2c" env:"ENABLE_H2C"` } type LogConfig struct { @@ -53,10 +54,13 @@ type TaskConfig struct { } type TasksConfig struct { - Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"` - Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"` - Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"` - Copy TaskConfig `json:"copy" envPrefix:"COPY_"` + Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"` + Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"` + Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"` + Copy TaskConfig `json:"copy" envPrefix:"COPY_"` + Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"` + DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"` + AllowRetryCanceled bool `json:"allow_retry_canceled" env:"ALLOW_RETRY_CANCELED"` } type Cors struct { @@ -71,6 +75,24 @@ type S3 struct { SSL bool `json:"ssl" env:"SSL"` } +type FTP struct { + Enable bool `json:"enable" env:"ENABLE"` + Listen string `json:"listen" env:"LISTEN"` + FindPasvPortAttempts int `json:"find_pasv_port_attempts" env:"FIND_PASV_PORT_ATTEMPTS"` + ActiveTransferPortNon20 bool `json:"active_transfer_port_non_20" env:"ACTIVE_TRANSFER_PORT_NON_20"` + IdleTimeout int `json:"idle_timeout" env:"IDLE_TIMEOUT"` + ConnectionTimeout int `json:"connection_timeout" env:"CONNECTION_TIMEOUT"` + DisableActiveMode bool `json:"disable_active_mode" env:"DISABLE_ACTIVE_MODE"` + DefaultTransferBinary bool `json:"default_transfer_binary" env:"DEFAULT_TRANSFER_BINARY"` + EnableActiveConnIPCheck bool `json:"enable_active_conn_ip_check" env:"ENABLE_ACTIVE_CONN_IP_CHECK"` + EnablePasvConnIPCheck bool `json:"enable_pasv_conn_ip_check" env:"ENABLE_PASV_CONN_IP_CHECK"` +} + +type SFTP struct { + Enable bool `json:"enable" env:"ENABLE"` + Listen string `json:"listen" env:"LISTEN"` +} + type Config struct { Force bool `json:"force" env:"FORCE"` SiteURL string `json:"site_url" env:"SITE_URL"` @@ -86,10 +108,14 @@ type Config struct { Log LogConfig `json:"log"` DelayedStart int `json:"delayed_start" env:"DELAYED_START"` MaxConnections int `json:"max_connections" env:"MAX_CONNECTIONS"` + MaxConcurrency int `json:"max_concurrency" env:"MAX_CONCURRENCY"` TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" env:"TLS_INSECURE_SKIP_VERIFY"` Tasks TasksConfig `json:"tasks" envPrefix:"TASKS_"` Cors Cors `json:"cors" envPrefix:"CORS_"` S3 S3 `json:"s3" envPrefix:"S3_"` + FTP FTP `json:"ftp" envPrefix:"FTP_"` + SFTP SFTP `json:"sftp" envPrefix:"SFTP_"` + LastLaunchedVersion string `json:"last_launched_version"` } func DefaultConfig() *Config { @@ -128,26 +154,37 @@ func DefaultConfig() *Config { MaxAge: 28, }, MaxConnections: 0, + MaxConcurrency: 64, TlsInsecureSkipVerify: true, Tasks: TasksConfig{ Download: TaskConfig{ - Workers: 5, - MaxRetry: 1, - TaskPersistant: true, + Workers: 5, + MaxRetry: 1, + // TaskPersistant: true, }, Transfer: TaskConfig{ - Workers: 5, - MaxRetry: 2, - TaskPersistant: true, + Workers: 5, + MaxRetry: 2, + // TaskPersistant: true, }, Upload: TaskConfig{ Workers: 5, }, Copy: TaskConfig{ - Workers: 5, - MaxRetry: 2, - TaskPersistant: true, + Workers: 5, + MaxRetry: 2, + // TaskPersistant: true, }, + Decompress: TaskConfig{ + Workers: 5, + MaxRetry: 2, + // TaskPersistant: true, + }, + DecompressUpload: TaskConfig{ + Workers: 5, + MaxRetry: 2, + }, + AllowRetryCanceled: false, }, Cors: Cors{ AllowOrigins: []string{"*"}, @@ -159,5 +196,22 @@ func DefaultConfig() *Config { Port: 5246, SSL: false, }, + FTP: FTP{ + Enable: false, + Listen: ":5221", + FindPasvPortAttempts: 50, + ActiveTransferPortNon20: false, + IdleTimeout: 900, + ConnectionTimeout: 30, + DisableActiveMode: false, + DefaultTransferBinary: false, + EnableActiveConnIPCheck: true, + EnablePasvConnIPCheck: true, + }, + SFTP: SFTP{ + Enable: false, + Listen: ":5222", + }, + LastLaunchedVersion: "", } } diff --git a/internal/conf/const.go b/internal/conf/const.go index 2d53702e..5cb8d850 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -22,15 +22,17 @@ const ( MainColor = "main_color" // preview - TextTypes = "text_types" - AudioTypes = "audio_types" - VideoTypes = "video_types" - ImageTypes = "image_types" - ProxyTypes = "proxy_types" - ProxyIgnoreHeaders = "proxy_ignore_headers" - AudioAutoplay = "audio_autoplay" - VideoAutoplay = "video_autoplay" - + TextTypes = "text_types" + AudioTypes = "audio_types" + VideoTypes = "video_types" + ImageTypes = "image_types" + ProxyTypes = "proxy_types" + ProxyIgnoreHeaders = "proxy_ignore_headers" + AudioAutoplay = "audio_autoplay" + VideoAutoplay = "video_autoplay" + PreviewArchivesByDefault = "preview_archives_by_default" + ReadMeAutoRender = "readme_autorender" + FilterReadMeScripts = "filter_readme_scripts" // global HideFiles = "hide_files" CustomizeHead = "customize_head" @@ -54,11 +56,24 @@ const ( Aria2Uri = "aria2_uri" Aria2Secret = "aria2_secret" + // transmission + TransmissionUri = "transmission_uri" + TransmissionSeedtime = "transmission_seedtime" + + // 115 + Pan115TempDir = "115_temp_dir" + + // pikpak + PikPakTempDir = "pikpak_temp_dir" + + // thunder + ThunderTempDir = "thunder_temp_dir" + // single Token = "token" IndexProgress = "index_progress" - //SSO + // SSO SSOClientId = "sso_client_id" SSOClientSecret = "sso_client_secret" SSOLoginEnabled = "sso_login_enabled" @@ -68,12 +83,13 @@ const ( SSOApplicationName = "sso_application_name" SSOEndpointName = "sso_endpoint_name" SSOJwtPublicKey = "sso_jwt_public_key" + SSOExtraScopes = "sso_extra_scopes" SSOAutoRegister = "sso_auto_register" SSODefaultDir = "sso_default_dir" SSODefaultPermission = "sso_default_permission" SSOCompatibilityMode = "sso_compatibility_mode" - //ldap + // ldap LdapLoginEnabled = "ldap_login_enabled" LdapServer = "ldap_server" LdapManagerDN = "ldap_manager_dn" @@ -84,7 +100,7 @@ const ( LdapDefaultDir = "ldap_default_dir" LdapLoginTips = "ldap_login_tips" - //s3 + // s3 S3Buckets = "s3_buckets" S3AccessKeyId = "s3_access_key_id" S3SecretAccessKey = "s3_secret_access_key" @@ -92,12 +108,33 @@ const ( // qbittorrent QbittorrentUrl = "qbittorrent_url" QbittorrentSeedtime = "qbittorrent_seedtime" + + // ftp + FTPPublicHost = "ftp_public_host" + FTPPasvPortMap = "ftp_pasv_port_map" + FTPProxyUserAgent = "ftp_proxy_user_agent" + FTPMandatoryTLS = "ftp_mandatory_tls" + FTPImplicitTLS = "ftp_implicit_tls" + FTPTLSPrivateKeyPath = "ftp_tls_private_key_path" + FTPTLSPublicCertPath = "ftp_tls_public_cert_path" + + // traffic + TaskOfflineDownloadThreadsNum = "offline_download_task_threads_num" + TaskOfflineDownloadTransferThreadsNum = "offline_download_transfer_task_threads_num" + TaskUploadThreadsNum = "upload_task_threads_num" + TaskCopyThreadsNum = "copy_task_threads_num" + TaskDecompressDownloadThreadsNum = "decompress_download_task_threads_num" + TaskDecompressUploadThreadsNum = "decompress_upload_task_threads_num" + StreamMaxClientDownloadSpeed = "max_client_download_speed" + StreamMaxClientUploadSpeed = "max_client_upload_speed" + StreamMaxServerDownloadSpeed = "max_server_download_speed" + StreamMaxServerUploadSpeed = "max_server_upload_speed" ) const ( UNKNOWN = iota FOLDER - //OFFICE + // OFFICE VIDEO AUDIO TEXT diff --git a/internal/conf/var.go b/internal/conf/var.go index 0a8eb16f..7ae1a5ab 100644 --- a/internal/conf/var.go +++ b/internal/conf/var.go @@ -7,7 +7,6 @@ import ( var ( BuiltAt string - GoVersion string GitAuthor string GitCommit string Version string = "dev" diff --git a/internal/db/db.go b/internal/db/db.go index 2df58d37..2cd18050 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -12,7 +12,7 @@ var db *gorm.DB func Init(d *gorm.DB) { db = d - err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem)) + err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey)) if err != nil { log.Fatalf("failed migrate database: %s", err.Error()) } diff --git a/internal/db/sshkey.go b/internal/db/sshkey.go new file mode 100644 index 00000000..f51dbfdc --- /dev/null +++ b/internal/db/sshkey.go @@ -0,0 +1,57 @@ +package db + +import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/pkg/errors" +) + +func GetSSHPublicKeyByUserId(userId uint, pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) { + keyDB := db.Model(&model.SSHPublicKey{}) + query := model.SSHPublicKey{UserId: userId} + if err := keyDB.Where(query).Count(&count).Error; err != nil { + return nil, 0, errors.Wrapf(err, "failed get user's keys count") + } + if err := keyDB.Where(query).Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&keys).Error; err != nil { + return nil, 0, errors.Wrapf(err, "failed get find user's keys") + } + return keys, count, nil +} + +func GetSSHPublicKeyById(id uint) (*model.SSHPublicKey, error) { + var k model.SSHPublicKey + if err := db.First(&k, id).Error; err != nil { + return nil, errors.Wrapf(err, "failed get old key") + } + return &k, nil +} + +func GetSSHPublicKeyByUserTitle(userId uint, title string) (*model.SSHPublicKey, error) { + key := model.SSHPublicKey{UserId: userId, Title: title} + if err := db.Where(key).First(&key).Error; err != nil { + return nil, errors.Wrapf(err, "failed find key with title of user") + } + return &key, nil +} + +func CreateSSHPublicKey(k *model.SSHPublicKey) error { + return errors.WithStack(db.Create(k).Error) +} + +func UpdateSSHPublicKey(k *model.SSHPublicKey) error { + return errors.WithStack(db.Save(k).Error) +} + +func GetSSHPublicKeys(pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) { + keyDB := db.Model(&model.SSHPublicKey{}) + if err := keyDB.Count(&count).Error; err != nil { + return nil, 0, errors.Wrapf(err, "failed get keys count") + } + if err := keyDB.Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&keys).Error; err != nil { + return nil, 0, errors.Wrapf(err, "failed get find keys") + } + return keys, count, nil +} + +func DeleteSSHPublicKeyById(id uint) error { + return errors.WithStack(db.Delete(&model.SSHPublicKey{}, id).Error) +} diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 781e8532..9e9440b6 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -77,7 +77,37 @@ type Remove interface { } type Put interface { - Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) error + // Put a file (provided as a FileStreamer) into the driver + // Besides the most basic upload functionality, the following features also need to be implemented: + // 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods: + // (1) Use request methods that carry context, such as the following: + // a. http.NewRequestWithContext + // b. resty.Request.SetContext + // c. s3manager.Uploader.UploadWithContext + // d. utils.CopyWithCtx + // (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream` + // (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process, + // this is typically applicable to chunked uploads. + // 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows: + // (1) Use `utils.CopyWithCtx` + // (2) Use `driver.ReaderUpdatingProgress` + // (3) Use `driver.Progress` with `io.TeeReader` + // 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream + // in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and + // before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN` + // if your file chunks are sufficiently small (less than about 50KB). + // NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if + // you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive + // mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive + // memory usage caused by buffering too many file chunks awaiting upload. + Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error +} + +type PutURL interface { + // PutURL directly put a URL into the storage + // Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs + // Called when using SimpleHttp for offline downloading, skipping creating a download task + PutURL(ctx context.Context, dstDir model.Obj, name, url string) error } //type WriteResult interface { @@ -106,27 +136,75 @@ type CopyResult interface { } type PutResult interface { - Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) (model.Obj, error) + // Put a file (provided as a FileStreamer) into the driver and return the put obj + // Besides the most basic upload functionality, the following features also need to be implemented: + // 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods: + // (1) Use request methods that carry context, such as the following: + // a. http.NewRequestWithContext + // b. resty.Request.SetContext + // c. s3manager.Uploader.UploadWithContext + // d. utils.CopyWithCtx + // (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream` + // (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process, + // this is typically applicable to chunked uploads. + // 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows: + // (1) Use `utils.CopyWithCtx` + // (2) Use `driver.ReaderUpdatingProgress` + // (3) Use `driver.Progress` with `io.TeeReader` + // 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream + // in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and + // before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN` + // if your file chunks are sufficiently small (less than about 50KB). + // NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if + // you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive + // mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive + // memory usage caused by buffering too many file chunks awaiting upload. + Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error) } -type UpdateProgress func(percentage float64) - -type Progress struct { - Total int64 - Done int64 - up UpdateProgress +type PutURLResult interface { + // PutURL directly put a URL into the storage + // Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs + // Called when using SimpleHttp for offline downloading, skipping creating a download task + PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error) } -func (p *Progress) Write(b []byte) (n int, err error) { - n = len(b) - p.Done += int64(n) - p.up(float64(p.Done) / float64(p.Total) * 100) - return +type ArchiveReader interface { + // GetArchiveMeta get the meta-info of an archive + // return errs.WrongArchivePassword if the meta-info is also encrypted but provided password is wrong or empty + // return errs.NotImplement to use internal archive tools to get the meta-info, such as the following cases: + // 1. the driver do not support the format of the archive but there may be an internal tool do + // 2. handling archives is a VIP feature, but the driver does not have VIP access + GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) + // ListArchive list the children of model.ArchiveArgs.InnerPath in the archive + // return errs.NotImplement to use internal archive tools to list the children + // return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree + ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) + // Extract get url/filepath/reader of a file in the archive + // return errs.NotImplement to use internal archive tools to extract + Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) } -func NewProgress(total int64, up UpdateProgress) *Progress { - return &Progress{ - Total: total, - up: up, - } +type ArchiveGetter interface { + // ArchiveGet get file by inner path + // return errs.NotImplement to use internal archive tools to get the children + // return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree + ArchiveGet(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (model.Obj, error) +} + +type ArchiveDecompress interface { + ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error +} + +type ArchiveDecompressResult interface { + // ArchiveDecompress decompress an archive + // when args.PutIntoNewDir, the new sub-folder should be named the same to the archive but without the extension + // return each decompressed obj from the root path of the archive when args.PutIntoNewDir is false + // return only the newly created folder when args.PutIntoNewDir is true + // return errs.NotImplement to use internal archive tools to decompress + ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) +} + +type Reference interface { + InitReference(storage Driver) error } diff --git a/internal/driver/utils.go b/internal/driver/utils.go new file mode 100644 index 00000000..2af850ec --- /dev/null +++ b/internal/driver/utils.go @@ -0,0 +1,62 @@ +package driver + +import ( + "context" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "io" +) + +type UpdateProgress = model.UpdateProgress + +type Progress struct { + Total int64 + Done int64 + up UpdateProgress +} + +func (p *Progress) Write(b []byte) (n int, err error) { + n = len(b) + p.Done += int64(n) + p.up(float64(p.Done) / float64(p.Total) * 100) + return +} + +func NewProgress(total int64, up UpdateProgress) *Progress { + return &Progress{ + Total: total, + up: up, + } +} + +type RateLimitReader = stream.RateLimitReader + +type RateLimitWriter = stream.RateLimitWriter + +type RateLimitFile = stream.RateLimitFile + +func NewLimitedUploadStream(ctx context.Context, r io.Reader) *RateLimitReader { + return &RateLimitReader{ + Reader: r, + Limiter: stream.ServerUploadLimit, + Ctx: ctx, + } +} + +func NewLimitedUploadFile(ctx context.Context, f model.File) *RateLimitFile { + return &RateLimitFile{ + File: f, + Limiter: stream.ServerUploadLimit, + Ctx: ctx, + } +} + +func ServerUploadLimitWaitN(ctx context.Context, n int) error { + return stream.ServerUploadLimit.WaitN(ctx, n) +} + +type ReaderWithCtx = stream.ReaderWithCtx + +type ReaderUpdatingProgress = stream.ReaderUpdatingProgress + +type SimpleReaderWithSize = stream.SimpleReaderWithSize diff --git a/internal/errs/errors.go b/internal/errs/errors.go index ecfe43e3..2a22dca1 100644 --- a/internal/errs/errors.go +++ b/internal/errs/errors.go @@ -19,6 +19,10 @@ var ( StorageNotFound = errors.New("storage not found") StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue") StreamPeekFail = errors.New("StreamPeekFail") + + UnknownArchiveFormat = errors.New("unknown archive format") + WrongArchivePassword = errors.New("wrong archive password") + DriverExtractNotSupported = errors.New("driver extraction not supported") ) // NewErr wrap constant error with an extra message diff --git a/internal/fs/archive.go b/internal/fs/archive.go new file mode 100644 index 00000000..dbae9b33 --- /dev/null +++ b/internal/fs/archive.go @@ -0,0 +1,400 @@ +package fs + +import ( + "context" + stderrors "errors" + "fmt" + "io" + "math/rand" + "mime" + "net/http" + "os" + stdpath "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/xhofe/tache" +) + +type ArchiveDownloadTask struct { + task.TaskExtension + model.ArchiveDecompressArgs + status string + SrcObjPath string + DstDirPath string + srcStorage driver.Driver + dstStorage driver.Driver + SrcStorageMp string + DstStorageMp string +} + +func (t *ArchiveDownloadTask) GetName() string { + return fmt.Sprintf("decompress [%s](%s)[%s] to [%s](%s) with password <%s>", t.SrcStorageMp, t.SrcObjPath, + t.InnerPath, t.DstStorageMp, t.DstDirPath, t.Password) +} + +func (t *ArchiveDownloadTask) GetStatus() string { + return t.status +} + +func (t *ArchiveDownloadTask) Run() error { + t.ReinitCtx() + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() + uploadTask, err := t.RunWithoutPushUploadTask() + if err != nil { + return err + } + ArchiveContentUploadTaskManager.Add(uploadTask) + return nil +} + +func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadTask, error) { + var err error + if t.srcStorage == nil { + t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp) + } + srcObj, tool, ss, err := op.GetArchiveToolAndStream(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{ + Header: http.Header{}, + }) + if err != nil { + return nil, err + } + defer func() { + var e error + for _, s := range ss { + e = stderrors.Join(e, s.Close()) + } + if e != nil { + log.Errorf("failed to close file streamer, %v", e) + } + }() + var decompressUp model.UpdateProgress + if t.CacheFull { + var total, cur int64 = 0, 0 + for _, s := range ss { + total += s.GetSize() + } + t.SetTotalBytes(total) + t.status = "getting src object" + for _, s := range ss { + if s.GetFile() == nil { + _, err = stream.CacheFullInTempFileAndUpdateProgress(s, func(p float64) { + t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total)) + }) + } + cur += s.GetSize() + if err != nil { + return nil, err + } + } + t.SetProgress(100.0) + decompressUp = func(_ float64) {} + } else { + decompressUp = t.SetProgress + } + t.status = "walking and decompressing" + dir, err := os.MkdirTemp(conf.Conf.TempDir, "dir-*") + if err != nil { + return nil, err + } + err = tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp) + if err != nil { + return nil, err + } + baseName := strings.TrimSuffix(srcObj.GetName(), stdpath.Ext(srcObj.GetName())) + uploadTask := &ArchiveContentUploadTask{ + TaskExtension: task.TaskExtension{ + Creator: t.GetCreator(), + }, + ObjName: baseName, + InPlace: !t.PutIntoNewDir, + FilePath: dir, + DstDirPath: t.DstDirPath, + dstStorage: t.dstStorage, + DstStorageMp: t.DstStorageMp, + } + return uploadTask, nil +} + +var ArchiveDownloadTaskManager *tache.Manager[*ArchiveDownloadTask] + +type ArchiveContentUploadTask struct { + task.TaskExtension + status string + ObjName string + InPlace bool + FilePath string + DstDirPath string + dstStorage driver.Driver + DstStorageMp string + finalized bool +} + +func (t *ArchiveContentUploadTask) GetName() string { + return fmt.Sprintf("upload %s to [%s](%s)", t.ObjName, t.DstStorageMp, t.DstDirPath) +} + +func (t *ArchiveContentUploadTask) GetStatus() string { + return t.status +} + +func (t *ArchiveContentUploadTask) Run() error { + t.ReinitCtx() + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() + return t.RunWithNextTaskCallback(func(nextTsk *ArchiveContentUploadTask) error { + ArchiveContentUploadTaskManager.Add(nextTsk) + return nil + }) +} + +func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTsk *ArchiveContentUploadTask) error) error { + var err error + if t.dstStorage == nil { + t.dstStorage, err = op.GetStorageByMountPath(t.DstStorageMp) + } + info, err := os.Stat(t.FilePath) + if err != nil { + return err + } + if info.IsDir() { + t.status = "src object is dir, listing objs" + nextDstPath := t.DstDirPath + if !t.InPlace { + nextDstPath = stdpath.Join(nextDstPath, t.ObjName) + err = op.MakeDir(t.Ctx(), t.dstStorage, nextDstPath) + if err != nil { + return err + } + } + entries, err := os.ReadDir(t.FilePath) + if err != nil { + return err + } + var es error + for _, entry := range entries { + var nextFilePath string + if entry.IsDir() { + nextFilePath, err = moveToTempPath(stdpath.Join(t.FilePath, entry.Name()), "dir-") + } else { + nextFilePath, err = moveToTempPath(stdpath.Join(t.FilePath, entry.Name()), "file-") + } + if err != nil { + es = stderrors.Join(es, err) + continue + } + err = f(&ArchiveContentUploadTask{ + TaskExtension: task.TaskExtension{ + Creator: t.GetCreator(), + }, + ObjName: entry.Name(), + InPlace: false, + FilePath: nextFilePath, + DstDirPath: nextDstPath, + dstStorage: t.dstStorage, + DstStorageMp: t.DstStorageMp, + }) + if err != nil { + es = stderrors.Join(es, err) + } + } + if es != nil { + return es + } + } else { + t.SetTotalBytes(info.Size()) + file, err := os.Open(t.FilePath) + if err != nil { + return err + } + fs := &stream.FileStream{ + Obj: &model.Object{ + Name: t.ObjName, + Size: info.Size(), + Modified: time.Now(), + }, + Mimetype: mime.TypeByExtension(filepath.Ext(t.ObjName)), + WebPutAsTask: true, + Reader: file, + } + fs.Closers.Add(file) + t.status = "uploading" + err = op.Put(t.Ctx(), t.dstStorage, t.DstDirPath, fs, t.SetProgress, true) + if err != nil { + return err + } + } + t.deleteSrcFile() + return nil +} + +func (t *ArchiveContentUploadTask) Cancel() { + t.TaskExtension.Cancel() + if !conf.Conf.Tasks.AllowRetryCanceled { + t.deleteSrcFile() + } +} + +func (t *ArchiveContentUploadTask) deleteSrcFile() { + if !t.finalized { + _ = os.RemoveAll(t.FilePath) + t.finalized = true + } +} + +func moveToTempPath(path, prefix string) (string, error) { + newPath, err := genTempFileName(prefix) + if err != nil { + return "", err + } + err = os.Rename(path, newPath) + if err != nil { + return "", err + } + return newPath, nil +} + +func genTempFileName(prefix string) (string, error) { + retry := 0 + for retry < 10000 { + newPath := stdpath.Join(conf.Conf.TempDir, prefix+strconv.FormatUint(uint64(rand.Uint32()), 10)) + if _, err := os.Stat(newPath); err != nil { + if os.IsNotExist(err) { + return newPath, nil + } else { + return "", err + } + } + retry++ + } + return "", errors.New("failed to generate temp-file name: too many retries") +} + +type archiveContentUploadTaskManagerType struct { + *tache.Manager[*ArchiveContentUploadTask] +} + +func (m *archiveContentUploadTaskManagerType) Remove(id string) { + if t, ok := m.GetByID(id); ok { + t.deleteSrcFile() + m.Manager.Remove(id) + } +} + +func (m *archiveContentUploadTaskManagerType) RemoveAll() { + tasks := m.GetAll() + for _, t := range tasks { + m.Remove(t.GetID()) + } +} + +func (m *archiveContentUploadTaskManagerType) RemoveByState(state ...tache.State) { + tasks := m.GetByState(state...) + for _, t := range tasks { + m.Remove(t.GetID()) + } +} + +func (m *archiveContentUploadTaskManagerType) RemoveByCondition(condition func(task *ArchiveContentUploadTask) bool) { + tasks := m.GetByCondition(condition) + for _, t := range tasks { + m.Remove(t.GetID()) + } +} + +var ArchiveContentUploadTaskManager = &archiveContentUploadTaskManagerType{ + Manager: nil, +} + +func archiveMeta(ctx context.Context, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return nil, errors.WithMessage(err, "failed get storage") + } + return op.GetArchiveMeta(ctx, storage, actualPath, args) +} + +func archiveList(ctx context.Context, path string, args model.ArchiveListArgs) ([]model.Obj, error) { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return nil, errors.WithMessage(err, "failed get storage") + } + return op.ListArchive(ctx, storage, actualPath, args) +} + +func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) (task.TaskExtensionInfo, error) { + srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath) + if err != nil { + return nil, errors.WithMessage(err, "failed get src storage") + } + dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) + if err != nil { + return nil, errors.WithMessage(err, "failed get dst storage") + } + if srcStorage.GetStorage() == dstStorage.GetStorage() { + err = op.ArchiveDecompress(ctx, srcStorage, srcObjActualPath, dstDirActualPath, args, lazyCache...) + if !errors.Is(err, errs.NotImplement) { + return nil, err + } + } + taskCreator, _ := ctx.Value("user").(*model.User) + tsk := &ArchiveDownloadTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, + ArchiveDecompressArgs: args, + srcStorage: srcStorage, + dstStorage: dstStorage, + SrcObjPath: srcObjActualPath, + DstDirPath: dstDirActualPath, + SrcStorageMp: srcStorage.GetStorage().MountPath, + DstStorageMp: dstStorage.GetStorage().MountPath, + } + if ctx.Value(conf.NoTaskKey) != nil { + uploadTask, err := tsk.RunWithoutPushUploadTask() + if err != nil { + return nil, errors.WithMessagef(err, "failed download [%s]", srcObjPath) + } + defer uploadTask.deleteSrcFile() + var callback func(t *ArchiveContentUploadTask) error + callback = func(t *ArchiveContentUploadTask) error { + e := t.RunWithNextTaskCallback(callback) + t.deleteSrcFile() + return e + } + return nil, uploadTask.RunWithNextTaskCallback(callback) + } else { + ArchiveDownloadTaskManager.Add(tsk) + return tsk, nil + } +} + +func archiveDriverExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed get storage") + } + return op.DriverExtract(ctx, storage, actualPath, args) +} + +func archiveInternalExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return nil, 0, errors.WithMessage(err, "failed get storage") + } + return op.InternalExtract(ctx, storage, actualPath, args) +} diff --git a/internal/fs/copy.go b/internal/fs/copy.go index 38407c9a..155e3cf7 100644 --- a/internal/fs/copy.go +++ b/internal/fs/copy.go @@ -3,21 +3,24 @@ package fs import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/errs" "net/http" stdpath "path" + "time" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" "github.com/alist-org/alist/v3/pkg/utils" "github.com/pkg/errors" "github.com/xhofe/tache" ) type CopyTask struct { - tache.Base + task.TaskExtension Status string `json:"-"` //don't save status to save space SrcObjPath string `json:"src_path"` DstDirPath string `json:"dst_path"` @@ -36,6 +39,10 @@ func (t *CopyTask) GetStatus() string { } func (t *CopyTask) Run() error { + t.ReinitCtx() + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() var err error if t.srcStorage == nil { t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp) @@ -53,7 +60,7 @@ var CopyTaskManager *tache.Manager[*CopyTask] // Copy if in the same storage, call move method // if not, add copy task -func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) { +func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) { srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath) if err != nil { return nil, errors.WithMessage(err, "failed get src storage") @@ -64,7 +71,10 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool } // copy if in the same storage, just call driver.Copy if srcStorage.GetStorage() == dstStorage.GetStorage() { - return nil, op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...) + err = op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...) + if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.NotSupport) { + return nil, err + } } if ctx.Value(conf.NoTaskKey) != nil { srcObj, err := op.Get(ctx, srcStorage, srcObjActualPath) @@ -92,7 +102,11 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool } } // not in the same storage + taskCreator, _ := ctx.Value("user").(*model.User) t := &CopyTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, srcStorage: srcStorage, dstStorage: dstStorage, SrcObjPath: srcObjActualPath, @@ -123,6 +137,9 @@ func copyBetween2Storages(t *CopyTask, srcStorage, dstStorage driver.Driver, src srcObjPath := stdpath.Join(srcObjPath, obj.GetName()) dstObjPath := stdpath.Join(dstDirPath, srcObj.GetName()) CopyTaskManager.Add(&CopyTask{ + TaskExtension: task.TaskExtension{ + Creator: t.GetCreator(), + }, srcStorage: srcStorage, dstStorage: dstStorage, SrcObjPath: srcObjPath, @@ -142,6 +159,7 @@ func copyFileBetween2Storages(tsk *CopyTask, srcStorage, dstStorage driver.Drive if err != nil { return errors.WithMessagef(err, "failed get src [%s] file", srcFilePath) } + tsk.SetTotalBytes(srcFile.GetSize()) link, _, err := op.Link(tsk.Ctx(), srcStorage, srcFilePath, model.LinkArgs{ Header: http.Header{}, }) diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 23e8a87a..01818e5f 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -2,11 +2,15 @@ package fs import ( "context" + log "github.com/sirupsen/logrus" + "io" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" - log "github.com/sirupsen/logrus" - "github.com/xhofe/tache" + "github.com/alist-org/alist/v3/internal/task" + "github.com/pkg/errors" ) // the param named path of functions in this package is a mount path @@ -69,7 +73,7 @@ func Move(ctx context.Context, srcPath, dstDirPath string, lazyCache ...bool) er return err } -func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) { +func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) { res, err := _copy(ctx, srcObjPath, dstDirPath, lazyCache...) if err != nil { log.Errorf("failed copy %s to %s: %+v", srcObjPath, dstDirPath, err) @@ -101,14 +105,54 @@ func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer return err } -func PutAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) { - t, err := putAsTask(dstDirPath, file) +func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskExtensionInfo, error) { + t, err := putAsTask(ctx, dstDirPath, file) if err != nil { log.Errorf("failed put %s: %+v", dstDirPath, err) } return t, err } +func ArchiveMeta(ctx context.Context, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) { + meta, err := archiveMeta(ctx, path, args) + if err != nil { + log.Errorf("failed get archive meta %s: %+v", path, err) + } + return meta, err +} + +func ArchiveList(ctx context.Context, path string, args model.ArchiveListArgs) ([]model.Obj, error) { + objs, err := archiveList(ctx, path, args) + if err != nil { + log.Errorf("failed list archive [%s]%s: %+v", path, args.InnerPath, err) + } + return objs, err +} + +func ArchiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) (task.TaskExtensionInfo, error) { + t, err := archiveDecompress(ctx, srcObjPath, dstDirPath, args, lazyCache...) + if err != nil { + log.Errorf("failed decompress [%s]%s: %+v", srcObjPath, args.InnerPath, err) + } + return t, err +} + +func ArchiveDriverExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) { + l, obj, err := archiveDriverExtract(ctx, path, args) + if err != nil { + log.Errorf("failed extract [%s]%s: %+v", path, args.InnerPath, err) + } + return l, obj, err +} + +func ArchiveInternalExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + l, obj, err := archiveInternalExtract(ctx, path, args) + if err != nil { + log.Errorf("failed extract [%s]%s: %+v", path, args.InnerPath, err) + } + return l, obj, err +} + type GetStoragesArgs struct { } @@ -127,3 +171,19 @@ func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) { } return res, err } + +func PutURL(ctx context.Context, path, dstName, urlStr string) error { + storage, dstDirActualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + return errors.WithMessage(err, "failed get storage") + } + if storage.Config().NoUpload { + return errors.WithStack(errs.UploadNotSupported) + } + _, ok := storage.(driver.PutURL) + _, okResult := storage.(driver.PutURLResult) + if !ok && !okResult { + return errs.NotImplement + } + return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr) +} diff --git a/internal/fs/put.go b/internal/fs/put.go index 807b15e0..bc33a3ac 100644 --- a/internal/fs/put.go +++ b/internal/fs/put.go @@ -7,12 +7,14 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/task" "github.com/pkg/errors" "github.com/xhofe/tache" + "time" ) type UploadTask struct { - tache.Base + task.TaskExtension storage driver.Driver dstDirActualPath string file model.FileStreamer @@ -27,13 +29,16 @@ func (t *UploadTask) GetStatus() string { } func (t *UploadTask) Run() error { + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() return op.Put(t.Ctx(), t.storage, t.dstDirActualPath, t.file, t.SetProgress, true) } var UploadTaskManager *tache.Manager[*UploadTask] // putAsTask add as a put task and return immediately -func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) { +func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskExtensionInfo, error) { storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) if err != nil { return nil, errors.WithMessage(err, "failed get storage") @@ -49,11 +54,16 @@ func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, //file.SetReader(tempFile) //file.SetTmpFile(tempFile) } + taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed t := &UploadTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, storage: storage, dstDirActualPath: dstDirActualPath, file: file, } + t.SetTotalBytes(file.GetSize()) UploadTaskManager.Add(t) return t, nil } diff --git a/internal/model/archive.go b/internal/model/archive.go new file mode 100644 index 00000000..01b83691 --- /dev/null +++ b/internal/model/archive.go @@ -0,0 +1,53 @@ +package model + +import "time" + +type ObjTree interface { + Obj + GetChildren() []ObjTree +} + +type ObjectTree struct { + Object + Children []ObjTree +} + +func (t *ObjectTree) GetChildren() []ObjTree { + return t.Children +} + +type ArchiveMeta interface { + GetComment() string + // IsEncrypted means if the content of the archive requires a password to access + // GetArchiveMeta should return errs.WrongArchivePassword if the meta-info is also encrypted, + // and the provided password is empty. + IsEncrypted() bool + // GetTree directly returns the full folder structure + // returns nil if the folder structure should be acquired by calling driver.ArchiveReader.ListArchive + GetTree() []ObjTree +} + +type ArchiveMetaInfo struct { + Comment string + Encrypted bool + Tree []ObjTree +} + +func (m *ArchiveMetaInfo) GetComment() string { + return m.Comment +} + +func (m *ArchiveMetaInfo) IsEncrypted() bool { + return m.Encrypted +} + +func (m *ArchiveMetaInfo) GetTree() []ObjTree { + return m.Tree +} + +type ArchiveMetaProvider struct { + ArchiveMeta + *Sort + DriverProviding bool + Expiration *time.Duration +} diff --git a/internal/model/args.go b/internal/model/args.go index 613699b9..f29c7e45 100644 --- a/internal/model/args.go +++ b/internal/model/args.go @@ -17,10 +17,11 @@ type ListArgs struct { } type LinkArgs struct { - IP string - Header http.Header - Type string - HttpReq *http.Request + IP string + Header http.Header + Type string + HttpReq *http.Request + Redirect bool } type Link struct { @@ -48,6 +49,33 @@ type FsOtherArgs struct { Method string `json:"method" form:"method"` Data interface{} `json:"data" form:"data"` } + +type ArchiveArgs struct { + Password string + LinkArgs +} + +type ArchiveInnerArgs struct { + ArchiveArgs + InnerPath string +} + +type ArchiveMetaArgs struct { + ArchiveArgs + Refresh bool +} + +type ArchiveListArgs struct { + ArchiveInnerArgs + Refresh bool +} + +type ArchiveDecompressArgs struct { + ArchiveInnerArgs + CacheFull bool + PutIntoNewDir bool +} + type RangeReadCloserIF interface { RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) utils.ClosersIF @@ -60,7 +88,7 @@ type RangeReadCloser struct { utils.Closers } -func (r RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { +func (r *RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { rc, err := r.RangeReader(ctx, httpRange) r.Closers.Add(rc) return rc, err diff --git a/internal/model/obj.go b/internal/model/obj.go index 122fb546..f0fce7a1 100644 --- a/internal/model/obj.go +++ b/internal/model/obj.go @@ -2,6 +2,7 @@ package model import ( "io" + "os" "sort" "strings" "time" @@ -48,8 +49,12 @@ type FileStreamer interface { RangeRead(http_range.Range) (io.Reader, error) //for a non-seekable Stream, if Read is called, this function won't work CacheFullInTempFile() (File, error) + SetTmpFile(r *os.File) + GetFile() File } +type UpdateProgress func(percentage float64) + type URL interface { URL() string } @@ -112,12 +117,12 @@ func ExtractFolder(objs []Obj, extractFolder string) { } func WrapObjName(objs Obj) Obj { - return &ObjWrapName{Obj: objs} + return &ObjWrapName{Name: utils.MappingName(objs.GetName()), Obj: objs} } func WrapObjsName(objs []Obj) { for i := 0; i < len(objs); i++ { - objs[i] = &ObjWrapName{Obj: objs[i]} + objs[i] = &ObjWrapName{Name: utils.MappingName(objs[i].GetName()), Obj: objs[i]} } } diff --git a/internal/model/object.go b/internal/model/object.go index 93f2c307..c8c10bb9 100644 --- a/internal/model/object.go +++ b/internal/model/object.go @@ -16,9 +16,6 @@ func (o *ObjWrapName) Unwrap() Obj { } func (o *ObjWrapName) GetName() string { - if o.Name == "" { - o.Name = utils.MappingName(o.Obj.GetName()) - } return o.Name } diff --git a/internal/model/setting.go b/internal/model/setting.go index c474935e..93b81fe5 100644 --- a/internal/model/setting.go +++ b/internal/model/setting.go @@ -11,6 +11,8 @@ const ( SSO LDAP S3 + FTP + TRAFFIC ) const ( diff --git a/internal/model/sshkey.go b/internal/model/sshkey.go new file mode 100644 index 00000000..6e97c103 --- /dev/null +++ b/internal/model/sshkey.go @@ -0,0 +1,28 @@ +package model + +import ( + "golang.org/x/crypto/ssh" + "time" +) + +type SSHPublicKey struct { + ID uint `json:"id" gorm:"primaryKey"` + UserId uint `json:"-"` + Title string `json:"title"` + Fingerprint string `json:"fingerprint"` + KeyStr string `gorm:"type:text" json:"-"` + AddedTime time.Time `json:"added_time"` + LastUsedTime time.Time `json:"last_used_time"` +} + +func (k *SSHPublicKey) GetKey() (ssh.PublicKey, error) { + pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.KeyStr)) + if err != nil { + return nil, err + } + return pubKey, nil +} + +func (k *SSHPublicKey) UpdateLastUsedTime() { + k.LastUsedTime = time.Now() +} diff --git a/internal/model/storage.go b/internal/model/storage.go index 14bcf45f..e3c7e1f9 100644 --- a/internal/model/storage.go +++ b/internal/model/storage.go @@ -1,6 +1,8 @@ package model -import "time" +import ( + "time" +) type Storage struct { ID uint `json:"id" gorm:"primaryKey"` // unique key @@ -13,6 +15,7 @@ type Storage struct { Remark string `json:"remark"` Modified time.Time `json:"modified"` Disabled bool `json:"disabled"` // if disabled + DisableIndex bool `json:"disable_index"` EnableSign bool `json:"enable_sign"` Sort Proxy diff --git a/internal/model/user.go b/internal/model/user.go index 2d61a971..eaa0fed9 100644 --- a/internal/model/user.go +++ b/internal/model/user.go @@ -32,16 +32,20 @@ type User struct { Role int `json:"role"` // user's role Disabled bool `json:"disabled"` // Determine permissions by bit - // 0: can see hidden files - // 1: can access without password - // 2: can add offline download tasks - // 3: can mkdir and upload - // 4: can rename - // 5: can move - // 6: can copy - // 7: can remove - // 8: webdav read - // 9: webdav write + // 0: can see hidden files + // 1: can access without password + // 2: can add offline download tasks + // 3: can mkdir and upload + // 4: can rename + // 5: can move + // 6: can copy + // 7: can remove + // 8: webdav read + // 9: webdav write + // 10: ftp/sftp login and read + // 11: ftp/sftp write + // 12: can read archives + // 13: can decompress archives Permission int32 `json:"permission"` OtpSecret string `json:"-"` SsoID string `json:"sso_id"` // unique by sso platform @@ -78,43 +82,59 @@ func (u *User) SetPassword(pwd string) *User { } func (u *User) CanSeeHides() bool { - return u.IsAdmin() || u.Permission&1 == 1 + return u.Permission&1 == 1 } func (u *User) CanAccessWithoutPassword() bool { - return u.IsAdmin() || (u.Permission>>1)&1 == 1 + return (u.Permission>>1)&1 == 1 } func (u *User) CanAddOfflineDownloadTasks() bool { - return u.IsAdmin() || (u.Permission>>2)&1 == 1 + return (u.Permission>>2)&1 == 1 } func (u *User) CanWrite() bool { - return u.IsAdmin() || (u.Permission>>3)&1 == 1 + return (u.Permission>>3)&1 == 1 } func (u *User) CanRename() bool { - return u.IsAdmin() || (u.Permission>>4)&1 == 1 + return (u.Permission>>4)&1 == 1 } func (u *User) CanMove() bool { - return u.IsAdmin() || (u.Permission>>5)&1 == 1 + return (u.Permission>>5)&1 == 1 } func (u *User) CanCopy() bool { - return u.IsAdmin() || (u.Permission>>6)&1 == 1 + return (u.Permission>>6)&1 == 1 } func (u *User) CanRemove() bool { - return u.IsAdmin() || (u.Permission>>7)&1 == 1 + return (u.Permission>>7)&1 == 1 } func (u *User) CanWebdavRead() bool { - return u.IsAdmin() || (u.Permission>>8)&1 == 1 + return (u.Permission>>8)&1 == 1 } func (u *User) CanWebdavManage() bool { - return u.IsAdmin() || (u.Permission>>9)&1 == 1 + return (u.Permission>>9)&1 == 1 +} + +func (u *User) CanFTPAccess() bool { + return (u.Permission>>10)&1 == 1 +} + +func (u *User) CanFTPManage() bool { + return (u.Permission>>11)&1 == 1 +} + +func (u *User) CanReadArchives() bool { + return (u.Permission>>12)&1 == 1 +} + +func (u *User) CanDecompress() bool { + return (u.Permission>>13)&1 == 1 } func (u *User) JoinPath(reqPath string) (string, error) { diff --git a/internal/net/request.go b/internal/net/request.go index 088ff66a..a1ff6d20 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -4,15 +4,15 @@ import ( "bytes" "context" "fmt" - "github.com/alist-org/alist/v3/pkg/utils" "io" - "math" "net/http" "strconv" "strings" "sync" "time" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/pkg/http_range" "github.com/aws/aws-sdk-go/aws/awsutil" log "github.com/sirupsen/logrus" @@ -20,7 +20,7 @@ import ( // DefaultDownloadPartSize is the default range of bytes to get at a time when // using Download(). -const DefaultDownloadPartSize = 1024 * 1024 * 10 +const DefaultDownloadPartSize = utils.MB * 10 // DefaultDownloadConcurrency is the default number of goroutines to spin up // when using Download(). @@ -29,6 +29,8 @@ const DefaultDownloadConcurrency = 2 // DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download. const DefaultPartBodyMaxRetries = 3 +var DefaultConcurrencyLimit *ConcurrencyLimit + type Downloader struct { PartSize int @@ -43,15 +45,15 @@ type Downloader struct { //RequestParam HttpRequestParams HttpClient HttpRequestFunc + + *ConcurrencyLimit } type HttpRequestFunc func(ctx context.Context, params *HttpRequestParams) (*http.Response, error) func NewDownloader(options ...func(*Downloader)) *Downloader { - d := &Downloader{ - HttpClient: DefaultHttpRequestFunc, - PartSize: DefaultDownloadPartSize, + d := &Downloader{ //允许不设置的选项 PartBodyMaxRetries: DefaultPartBodyMaxRetries, - Concurrency: DefaultDownloadConcurrency, + ConcurrencyLimit: DefaultConcurrencyLimit, } for _, option := range options { option(d) @@ -73,16 +75,16 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo impl := downloader{params: &finalP, cfg: d, ctx: ctx} // Ensures we don't need nil checks later on - - impl.partBodyMaxRetries = d.PartBodyMaxRetries - + // 必需的选项 if impl.cfg.Concurrency == 0 { impl.cfg.Concurrency = DefaultDownloadConcurrency } - if impl.cfg.PartSize == 0 { impl.cfg.PartSize = DefaultDownloadPartSize } + if impl.cfg.HttpClient == nil { + impl.cfg.HttpClient = DefaultHttpRequestFunc + } return impl.download() } @@ -90,7 +92,7 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo // downloader is the implementation structure used internally by Downloader. type downloader struct { ctx context.Context - cancel context.CancelFunc + cancel context.CancelCauseFunc cfg Downloader params *HttpRequestParams //http request params @@ -100,38 +102,78 @@ type downloader struct { m sync.Mutex nextChunk int //next chunk id - chunks []chunk bufs []*Buf - //totalBytes int64 - written int64 //total bytes of file downloaded from remote - err error + written int64 //total bytes of file downloaded from remote + err error - partBodyMaxRetries int + concurrency int //剩余的并发数,递减。到0时停止并发 + maxPart int //有多少个分片 + pos int64 + maxPos int64 + m2 sync.Mutex + readingID int // 正在被读取的id +} + +type ConcurrencyLimit struct { + _m sync.Mutex + Limit int // 需要大于0 +} + +var ErrExceedMaxConcurrency = fmt.Errorf("ExceedMaxConcurrency") + +func (l *ConcurrencyLimit) sub() error { + l._m.Lock() + defer l._m.Unlock() + if l.Limit-1 < 0 { + return ErrExceedMaxConcurrency + } + l.Limit-- + // log.Debugf("ConcurrencyLimit.sub: %d", l.Limit) + return nil +} +func (l *ConcurrencyLimit) add() { + l._m.Lock() + defer l._m.Unlock() + l.Limit++ + // log.Debugf("ConcurrencyLimit.add: %d", l.Limit) +} + +// 检测是否超过限制 +func (d *downloader) concurrencyCheck() error { + if d.cfg.ConcurrencyLimit != nil { + return d.cfg.ConcurrencyLimit.sub() + } + return nil +} +func (d *downloader) concurrencyFinish() { + if d.cfg.ConcurrencyLimit != nil { + d.cfg.ConcurrencyLimit.add() + } } // download performs the implementation of the object download across ranged GETs. func (d *downloader) download() (io.ReadCloser, error) { - d.ctx, d.cancel = context.WithCancel(d.ctx) + if err := d.concurrencyCheck(); err != nil { + return nil, err + } + d.ctx, d.cancel = context.WithCancelCause(d.ctx) - pos := d.params.Range.Start - maxPos := d.params.Range.Start + d.params.Range.Length - id := 0 - for pos < maxPos { - finalSize := int64(d.cfg.PartSize) - //check boundary - if pos+finalSize > maxPos { - finalSize = maxPos - pos - } - c := chunk{start: pos, size: finalSize, id: id} - d.chunks = append(d.chunks, c) - pos += finalSize - id++ + maxPart := int(d.params.Range.Length / int64(d.cfg.PartSize)) + if d.params.Range.Length%int64(d.cfg.PartSize) > 0 { + maxPart++ } - if len(d.chunks) < d.cfg.Concurrency { - d.cfg.Concurrency = len(d.chunks) + if maxPart < d.cfg.Concurrency { + d.cfg.Concurrency = maxPart } + log.Debugf("cfgConcurrency:%d", d.cfg.Concurrency) if d.cfg.Concurrency == 1 { + if d.cfg.ConcurrencyLimit != nil { + go func() { + <-d.ctx.Done() + d.concurrencyFinish() + }() + } resp, err := d.cfg.HttpClient(d.ctx, d.params) if err != nil { return nil, err @@ -142,60 +184,115 @@ func (d *downloader) download() (io.ReadCloser, error) { // workers d.chunkChannel = make(chan chunk, d.cfg.Concurrency) - for i := 0; i < d.cfg.Concurrency; i++ { - buf := NewBuf(d.ctx, d.cfg.PartSize, i) - d.bufs = append(d.bufs, buf) - go d.downloadPart() - } - // initial tasks - for i := 0; i < d.cfg.Concurrency; i++ { - d.sendChunkTask() - } + d.maxPart = maxPart + d.pos = d.params.Range.Start + d.maxPos = d.params.Range.Start + d.params.Range.Length + d.concurrency = d.cfg.Concurrency + d.sendChunkTask(true) - var rc io.ReadCloser = NewMultiReadCloser(d.chunks[0].buf, d.interrupt, d.finishBuf) + var rc io.ReadCloser = NewMultiReadCloser(d.bufs[0], d.interrupt, d.finishBuf) // Return error return rc, d.err } -func (d *downloader) sendChunkTask() *chunk { - ch := &d.chunks[d.nextChunk] - ch.buf = d.getBuf(d.nextChunk) - ch.buf.Reset(int(ch.size)) - d.chunkChannel <- *ch - d.nextChunk++ - return ch + +func (d *downloader) sendChunkTask(newConcurrency bool) error { + d.m.Lock() + defer d.m.Unlock() + isNewBuf := d.concurrency > 0 + if newConcurrency { + if d.concurrency <= 0 { + return nil + } + if d.nextChunk > 0 { // 第一个不检查,因为已经检查过了 + if err := d.concurrencyCheck(); err != nil { + return err + } + } + d.concurrency-- + go d.downloadPart() + } + + var buf *Buf + if isNewBuf { + buf = NewBuf(d.ctx, d.cfg.PartSize) + d.bufs = append(d.bufs, buf) + } else { + buf = d.getBuf(d.nextChunk) + } + + if d.pos < d.maxPos { + finalSize := int64(d.cfg.PartSize) + switch d.nextChunk { + case 0: + // 最小分片在前面有助视频播放? + firstSize := d.params.Range.Length % finalSize + if firstSize > 0 { + minSize := finalSize / 2 + if firstSize < minSize { // 最小分片太小就调整到一半 + finalSize = minSize + } else { + finalSize = firstSize + } + } + case 1: + firstSize := d.params.Range.Length % finalSize + minSize := finalSize / 2 + if firstSize > 0 && firstSize < minSize { + finalSize += firstSize - minSize + } + } + buf.Reset(int(finalSize)) + ch := chunk{ + start: d.pos, + size: finalSize, + id: d.nextChunk, + buf: buf, + + newConcurrency: newConcurrency, + } + d.pos += finalSize + d.nextChunk++ + d.chunkChannel <- ch + return nil + } + return nil } // when the final reader Close, we interrupt func (d *downloader) interrupt() error { - d.cancel() if d.written != d.params.Range.Length { log.Debugf("Downloader interrupt before finish") if d.getErr() == nil { d.setErr(fmt.Errorf("interrupted")) } } + d.cancel(d.err) defer func() { close(d.chunkChannel) for _, buf := range d.bufs { buf.Close() } + if d.concurrency > 0 { + d.concurrency = -d.concurrency + } + log.Debugf("maxConcurrency:%d", d.cfg.Concurrency+d.concurrency) }() return d.err } func (d *downloader) getBuf(id int) (b *Buf) { - - return d.bufs[id%d.cfg.Concurrency] + return d.bufs[id%len(d.bufs)] } -func (d *downloader) finishBuf(id int) (isLast bool, buf *Buf) { - if id >= len(d.chunks)-1 { +func (d *downloader) finishBuf(id int) (isLast bool, nextBuf *Buf) { + id++ + if id >= d.maxPart { return true, nil } - if d.nextChunk > id+1 { - return false, d.getBuf(id + 1) - } - ch := d.sendChunkTask() - return false, ch.buf + + d.sendChunkTask(false) + + d.readingID = id + return false, d.getBuf(id) } // downloadPart is an individual goroutine worker reading from the ch channel @@ -210,58 +307,122 @@ func (d *downloader) downloadPart() { if d.getErr() != nil { // Drain the channel if there is an error, to prevent deadlocking // of download producer. - continue + break } - log.Debugf("downloadPart tried to get chunk") if err := d.downloadChunk(&c); err != nil { + if err == errCancelConcurrency { + break + } + if err == context.Canceled { + if e := context.Cause(d.ctx); e != nil { + err = e + } + } d.setErr(err) + d.cancel(err) } } + d.concurrencyFinish() } // downloadChunk downloads the chunk func (d *downloader) downloadChunk(ch *chunk) error { - log.Debugf("start new chunk %+v buffer_id =%d", ch, ch.id) + log.Debugf("start chunk_%d, %+v", ch.id, ch) + params := d.getParamsFromChunk(ch) var n int64 var err error - params := d.getParamsFromChunk(ch) - for retry := 0; retry <= d.partBodyMaxRetries; retry++ { + for retry := 0; retry <= d.cfg.PartBodyMaxRetries; retry++ { if d.getErr() != nil { - return d.getErr() + return nil } n, err = d.tryDownloadChunk(params, ch) if err == nil { + d.incrWritten(n) + log.Debugf("chunk_%d downloaded", ch.id) break } - // Check if the returned error is an errReadingBody. - // If err is errReadingBody this indicates that an error - // occurred while copying the http response body. + if d.getErr() != nil { + return nil + } + if utils.IsCanceled(d.ctx) { + return d.ctx.Err() + } + // Check if the returned error is an errNeedRetry. // If this occurs we unwrap the err to set the underlying error // and attempt any remaining retries. - if bodyErr, ok := err.(*errReadingBody); ok { - err = bodyErr.Unwrap() + if e, ok := err.(*errNeedRetry); ok { + err = e.Unwrap() + if n > 0 { + // 测试:下载时 断开 alist向云盘发起的下载连接 + // 校验:下载完后校验文件哈希值 一致 + d.incrWritten(n) + ch.start += n + ch.size -= n + params.Range.Start = ch.start + params.Range.Length = ch.size + } + log.Warnf("err chunk_%d, object part download error %s, retrying attempt %d. %v", + ch.id, params.URL, retry, err) + } else if err == errInfiniteRetry { + retry-- + continue } else { - return err + break } - - //ch.cur = 0 - - log.Debugf("object part body download interrupted %s, err, %v, retrying attempt %d", - params.URL, err, retry) } - d.incrWritten(n) - log.Debugf("down_%d downloaded chunk", ch.id) - //ch.buf.buffer.wg1.Wait() - //log.Debugf("down_%d downloaded chunk,wg wait passed", ch.id) return err } -func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) { +var errCancelConcurrency = fmt.Errorf("cancel concurrency") +var errInfiniteRetry = fmt.Errorf("infinite retry") +func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) { resp, err := d.cfg.HttpClient(d.ctx, params) if err != nil { - return 0, err + if resp == nil { + return 0, err + } + if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return 0, err + } + if ch.id == 0 { //第1个任务 有限的重试,超过重试就会结束请求 + switch resp.StatusCode { + default: + return 0, err + case http.StatusTooManyRequests: + case http.StatusBadGateway: + case http.StatusServiceUnavailable: + case http.StatusGatewayTimeout: + } + <-time.After(time.Millisecond * 200) + return 0, &errNeedRetry{err: fmt.Errorf("http request failure,status: %d", resp.StatusCode)} + } + + // 来到这 说明第1个分片下载 连接成功了 + // 后续分片下载出错都当超载处理 + log.Debugf("err chunk_%d, try downloading:%v", ch.id, err) + + d.m.Lock() + isCancelConcurrency := ch.newConcurrency + if d.concurrency > 0 { // 取消剩余的并发任务 + // 用于计算实际的并发数 + d.concurrency = -d.concurrency + isCancelConcurrency = true + } + if isCancelConcurrency { + d.concurrency-- + d.chunkChannel <- *ch + d.m.Unlock() + return 0, errCancelConcurrency + } + d.m.Unlock() + if ch.id != d.readingID { //正在被读取的优先重试 + d.m2.Lock() + defer d.m2.Unlock() + <-time.After(time.Millisecond * 200) + } + return 0, errInfiniteRetry } defer resp.Body.Close() //only check file size on the first task @@ -271,15 +432,15 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int return 0, err } } - + d.sendChunkTask(true) n, err := utils.CopyWithBuffer(ch.buf, resp.Body) if err != nil { - return n, &errReadingBody{err: err} + return n, &errNeedRetry{err: err} } if n != ch.size { err = fmt.Errorf("chunk download size incorrect, expected=%d, got=%d", ch.size, n) - return n, &errReadingBody{err: err} + return n, &errNeedRetry{err: err} } return n, nil @@ -295,7 +456,7 @@ func (d *downloader) getParamsFromChunk(ch *chunk) *HttpRequestParams { func (d *downloader) checkTotalBytes(resp *http.Response) error { var err error - var totalBytes int64 = math.MinInt64 + totalBytes := int64(-1) contentRange := resp.Header.Get("Content-Range") if len(contentRange) == 0 { // ContentRange is nil when the full file contents is provided, and @@ -327,8 +488,9 @@ func (d *downloader) checkTotalBytes(resp *http.Response) error { err = fmt.Errorf("expect file size=%d unmatch remote report size=%d, need refresh cache", d.params.Size, totalBytes) } if err != nil { - _ = d.interrupt() + // _ = d.interrupt() d.setErr(err) + d.cancel(err) } return err @@ -367,9 +529,7 @@ type chunk struct { buf *Buf id int - // Downloader takes range (start,length), but this chunk is requesting equal/sub range of it. - // To convert the writer to reader eventually, we need to write within the boundary - //boundary http_range.Range + newConcurrency bool } func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) { @@ -377,7 +537,7 @@ func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*ht res, err := RequestHttp(ctx, "GET", header, params.URL) if err != nil { - return nil, err + return res, err } return res, nil } @@ -390,15 +550,15 @@ type HttpRequestParams struct { //total file size Size int64 } -type errReadingBody struct { +type errNeedRetry struct { err error } -func (e *errReadingBody) Error() string { - return fmt.Sprintf("failed to read part body: %v", e.err) +func (e *errNeedRetry) Error() string { + return e.err.Error() } -func (e *errReadingBody) Unwrap() error { +func (e *errNeedRetry) Unwrap() error { return e.err } @@ -436,9 +596,13 @@ func (mr MultiReadCloser) Read(p []byte) (n int, err error) { } mr.cfg.curBuf = next mr.cfg.rPos++ - //current.Close() return n, nil } + if err == context.Canceled { + if e := context.Cause(mr.cfg.curBuf.ctx); e != nil { + err = e + } + } return n, err } func (mr MultiReadCloser) Close() error { @@ -451,18 +615,15 @@ type Buf struct { ctx context.Context off int rw sync.Mutex - //notify chan struct{} } // NewBuf is a buffer that can have 1 read & 1 write at the same time. // when read is faster write, immediately feed data to read after written -func NewBuf(ctx context.Context, maxSize int, id int) *Buf { - d := make([]byte, 0, maxSize) +func NewBuf(ctx context.Context, maxSize int) *Buf { return &Buf{ ctx: ctx, - buffer: bytes.NewBuffer(d), + buffer: bytes.NewBuffer(make([]byte, 0, maxSize)), size: maxSize, - //notify: make(chan struct{}), } } func (br *Buf) Reset(size int) { @@ -500,8 +661,6 @@ func (br *Buf) Read(p []byte) (n int, err error) { select { case <-br.ctx.Done(): return 0, br.ctx.Err() - //case <-br.notify: - // return 0, nil case <-time.After(time.Millisecond * 200): return 0, nil } @@ -514,13 +673,9 @@ func (br *Buf) Write(p []byte) (n int, err error) { br.rw.Lock() defer br.rw.Unlock() n, err = br.buffer.Write(p) - select { - //case br.notify <- struct{}{}: - default: - } return } func (br *Buf) Close() { - //close(br.notify) + br.buffer = nil } diff --git a/internal/net/serve.go b/internal/net/serve.go index 0eb8cbb8..bdeac0ac 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -3,6 +3,7 @@ package net import ( "compress/gzip" "context" + "crypto/tls" "fmt" "io" "mime" @@ -14,7 +15,6 @@ import ( "sync" "time" - "github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/http_range" @@ -52,18 +52,19 @@ import ( // // If the caller has set w's ETag header formatted per RFC 7232, section 2.3, // ServeHTTP uses it to handle requests using If-Match, If-None-Match, or If-Range. -func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReaderFunc model.RangeReaderFunc) { +func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReadCloser model.RangeReadCloserIF) error { + defer RangeReadCloser.Close() setLastModified(w, modTime) done, rangeReq := checkPreconditions(w, r, modTime) if done { - return + return nil } if size < 0 { // since too many functions need file size to work, // will not implement the support of unknown file size here http.Error(w, "negative content size not supported", http.StatusInternalServerError) - return + return nil } code := http.StatusOK @@ -102,7 +103,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time fallthrough default: http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return + return nil } if sumRangesSize(ranges) > size { @@ -110,12 +111,20 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time // or unknown file size, ignore the range request. ranges = nil } + + // 使用请求的Context + // 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞 + ctx := context.WithValue(r.Context(), "request_header", r.Header) switch { case len(ranges) == 0: - reader, err := RangeReaderFunc(context.Background(), http_range.Range{Length: -1}) + reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1}) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + code = http.StatusRequestedRangeNotSatisfiable + if err == ErrExceedMaxConcurrency { + code = http.StatusTooManyRequests + } + http.Error(w, err.Error(), code) + return nil } sendContent = reader case len(ranges) == 1: @@ -131,10 +140,14 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time // does not request multiple parts might not support // multipart responses." ra := ranges[0] - sendContent, err = RangeReaderFunc(context.Background(), ra) + sendContent, err = RangeReadCloser.RangeRead(ctx, ra) if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return + code = http.StatusRequestedRangeNotSatisfiable + if err == ErrExceedMaxConcurrency { + code = http.StatusTooManyRequests + } + http.Error(w, err.Error(), code) + return nil } sendSize = ra.Length code = http.StatusPartialContent @@ -158,7 +171,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time pw.CloseWithError(err) return } - reader, err := RangeReaderFunc(context.Background(), ra) + reader, err := RangeReadCloser.RangeRead(ctx, ra) if err != nil { pw.CloseWithError(err) return @@ -167,14 +180,12 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time pw.CloseWithError(err) return } - //defer reader.Close() } mw.Close() pw.Close() }() } - defer sendContent.Close() w.Header().Set("Accept-Ranges", "bytes") if w.Header().Get("Content-Encoding") == "" { @@ -190,9 +201,15 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time if written != sendSize { log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize) } - http.Error(w, err.Error(), http.StatusInternalServerError) + code = http.StatusInternalServerError + if err == ErrExceedMaxConcurrency { + code = http.StatusTooManyRequests + } + w.WriteHeader(code) + return err } } + return nil } func ProcessHeader(origin, override http.Header) http.Header { result := http.Header{} @@ -239,7 +256,7 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea _ = res.Body.Close() msg := string(all) log.Debugln(msg) - return nil, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, res.StatusCode, msg) + return res, fmt.Errorf("http request [%s] failure,status: %d response:%s", URL, res.StatusCode, msg) } return res, nil } @@ -249,7 +266,7 @@ var httpClient *http.Client func HttpClient() *http.Client { once.Do(func() { - httpClient = base.NewHttpClient() + httpClient = NewHttpClient() httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { if len(via) >= 10 { return errors.New("stopped after 10 redirects") @@ -260,3 +277,13 @@ func HttpClient() *http.Client { }) return httpClient } + +func NewHttpClient() *http.Client { + return &http.Client{ + Timeout: time.Hour * 48, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}, + }, + } +} diff --git a/internal/net/util.go b/internal/net/util.go index 44201859..5b335a7f 100644 --- a/internal/net/util.go +++ b/internal/net/util.go @@ -2,7 +2,6 @@ package net import ( "fmt" - "github.com/alist-org/alist/v3/pkg/utils" "io" "math" "mime/multipart" @@ -11,6 +10,8 @@ import ( "strings" "time" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/pkg/http_range" log "github.com/sirupsen/logrus" ) @@ -70,6 +71,7 @@ func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult { if im == "" { return condNone } + r.Header.Del("If-Match") for { im = textproto.TrimString(im) if len(im) == 0 { @@ -97,7 +99,11 @@ func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult { func checkIfUnmodifiedSince(r *http.Request, modtime time.Time) condResult { ius := r.Header.Get("If-Unmodified-Since") - if ius == "" || isZeroTime(modtime) { + if ius == "" { + return condNone + } + r.Header.Del("If-Unmodified-Since") + if isZeroTime(modtime) { return condNone } t, err := http.ParseTime(ius) @@ -119,6 +125,7 @@ func checkIfNoneMatch(w http.ResponseWriter, r *http.Request) condResult { if inm == "" { return condNone } + r.Header.Del("If-None-Match") buf := inm for { buf = textproto.TrimString(buf) @@ -149,7 +156,11 @@ func checkIfModifiedSince(r *http.Request, modtime time.Time) condResult { return condNone } ims := r.Header.Get("If-Modified-Since") - if ims == "" || isZeroTime(modtime) { + if ims == "" { + return condNone + } + r.Header.Del("If-Modified-Since") + if isZeroTime(modtime) { return condNone } t, err := http.ParseTime(ims) @@ -173,6 +184,7 @@ func checkIfRange(w http.ResponseWriter, r *http.Request, modtime time.Time) con if ir == "" { return condNone } + r.Header.Del("If-Range") etag, _ := scanETag(ir) if etag != "" { if etagStrongMatch(etag, w.Header().Get("Etag")) { diff --git a/internal/offline_download/115/client.go b/internal/offline_download/115/client.go index 0ebf38ff..3f9d804d 100644 --- a/internal/offline_download/115/client.go +++ b/internal/offline_download/115/client.go @@ -3,6 +3,8 @@ package _115 import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/drivers/115" "github.com/alist-org/alist/v3/internal/errs" @@ -33,13 +35,23 @@ func (p *Cloud115) Init() (string, error) { } func (p *Cloud115) IsReady() bool { + tempDir := setting.GetStr(conf.Pan115TempDir) + if tempDir == "" { + return false + } + storage, _, err := op.GetStorageAndActualPath(tempDir) + if err != nil { + return false + } + if _, ok := storage.(*_115.Pan115); !ok { + return false + } return true } func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) { // 添加新任务刷新缓存 p.refreshTaskCache = true - // args.TempDir 已经被修改为了 DstDirPath storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir) if err != nil { return "", err @@ -50,6 +62,11 @@ func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) { } ctx := context.Background() + + if err := op.MakeDir(ctx, storage, actualPath); err != nil { + return "", err + } + parentDir, err := op.GetUnwrap(ctx, storage, actualPath) if err != nil { return "", err @@ -64,7 +81,7 @@ func (p *Cloud115) AddURL(args *tool.AddUrlArgs) (string, error) { } func (p *Cloud115) Remove(task *tool.DownloadTask) error { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return err } @@ -81,7 +98,7 @@ func (p *Cloud115) Remove(task *tool.DownloadTask) error { } func (p *Cloud115) Status(task *tool.DownloadTask) (*tool.Status, error) { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return nil, err } @@ -107,6 +124,7 @@ func (p *Cloud115) Status(task *tool.DownloadTask) (*tool.Status, error) { s.Progress = t.Percent s.Status = t.GetStatus() s.Completed = t.IsDone() + s.TotalBytes = t.Size if t.IsFailed() { s.Err = fmt.Errorf(t.GetStatus()) } diff --git a/internal/offline_download/all.go b/internal/offline_download/all.go index ee80b5a0..3d0c7c73 100644 --- a/internal/offline_download/all.go +++ b/internal/offline_download/all.go @@ -6,4 +6,6 @@ import ( _ "github.com/alist-org/alist/v3/internal/offline_download/http" _ "github.com/alist-org/alist/v3/internal/offline_download/pikpak" _ "github.com/alist-org/alist/v3/internal/offline_download/qbit" + _ "github.com/alist-org/alist/v3/internal/offline_download/thunder" + _ "github.com/alist-org/alist/v3/internal/offline_download/transmission" ) diff --git a/internal/offline_download/aria2/aria2.go b/internal/offline_download/aria2/aria2.go index d22b32f9..fb212b35 100644 --- a/internal/offline_download/aria2/aria2.go +++ b/internal/offline_download/aria2/aria2.go @@ -82,7 +82,7 @@ func (a *Aria2) Status(task *tool.DownloadTask) (*tool.Status, error) { if err != nil { return nil, err } - total, err := strconv.ParseUint(info.TotalLength, 10, 64) + total, err := strconv.ParseInt(info.TotalLength, 10, 64) if err != nil { total = 0 } @@ -91,8 +91,9 @@ func (a *Aria2) Status(task *tool.DownloadTask) (*tool.Status, error) { downloaded = 0 } s := &tool.Status{ - Completed: info.Status == "complete", - Err: err, + Completed: info.Status == "complete", + Err: err, + TotalBytes: total, } s.Progress = float64(downloaded) / float64(total) * 100 if len(info.FollowedBy) != 0 { diff --git a/internal/offline_download/http/client.go b/internal/offline_download/http/client.go index 6f22fcf7..9b83400e 100644 --- a/internal/offline_download/http/client.go +++ b/internal/offline_download/http/client.go @@ -83,6 +83,7 @@ func (s SimpleHttp) Run(task *tool.DownloadTask) error { } defer file.Close() fileSize := resp.ContentLength + task.SetTotalBytes(fileSize) err = utils.CopyWithCtx(task.Ctx(), file, resp.Body, fileSize, task.SetProgress) return err } diff --git a/internal/offline_download/pikpak/pikpak.go b/internal/offline_download/pikpak/pikpak.go index 618b1442..8fdfb340 100644 --- a/internal/offline_download/pikpak/pikpak.go +++ b/internal/offline_download/pikpak/pikpak.go @@ -3,6 +3,9 @@ package pikpak import ( "context" "fmt" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/setting" + "strconv" "github.com/alist-org/alist/v3/drivers/pikpak" "github.com/alist-org/alist/v3/internal/errs" @@ -16,7 +19,7 @@ type PikPak struct { } func (p *PikPak) Name() string { - return "pikpak" + return "PikPak" } func (p *PikPak) Items() []model.SettingItem { @@ -33,13 +36,23 @@ func (p *PikPak) Init() (string, error) { } func (p *PikPak) IsReady() bool { + tempDir := setting.GetStr(conf.PikPakTempDir) + if tempDir == "" { + return false + } + storage, _, err := op.GetStorageAndActualPath(tempDir) + if err != nil { + return false + } + if _, ok := storage.(*pikpak.PikPak); !ok { + return false + } return true } func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) { // 添加新任务刷新缓存 p.refreshTaskCache = true - // args.TempDir 已经被修改为了 DstDirPath storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir) if err != nil { return "", err @@ -50,6 +63,11 @@ func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) { } ctx := context.Background() + + if err := op.MakeDir(ctx, storage, actualPath); err != nil { + return "", err + } + parentDir, err := op.GetUnwrap(ctx, storage, actualPath) if err != nil { return "", err @@ -64,7 +82,7 @@ func (p *PikPak) AddURL(args *tool.AddUrlArgs) (string, error) { } func (p *PikPak) Remove(task *tool.DownloadTask) error { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return err } @@ -81,7 +99,7 @@ func (p *PikPak) Remove(task *tool.DownloadTask) error { } func (p *PikPak) Status(task *tool.DownloadTask) (*tool.Status, error) { - storage, _, err := op.GetStorageAndActualPath(task.DstDirPath) + storage, _, err := op.GetStorageAndActualPath(task.TempDir) if err != nil { return nil, err } @@ -105,6 +123,10 @@ func (p *PikPak) Status(task *tool.DownloadTask) (*tool.Status, error) { s.Progress = float64(t.Progress) s.Status = t.Message s.Completed = (t.Phase == "PHASE_TYPE_COMPLETE") + s.TotalBytes, err = strconv.ParseInt(t.FileSize, 10, 64) + if err != nil { + s.TotalBytes = 0 + } if t.Phase == "PHASE_TYPE_ERROR" { s.Err = fmt.Errorf(t.Message) } diff --git a/internal/offline_download/qbit/qbit.go b/internal/offline_download/qbit/qbit.go index 807ebfef..458de03f 100644 --- a/internal/offline_download/qbit/qbit.go +++ b/internal/offline_download/qbit/qbit.go @@ -64,6 +64,7 @@ func (a *QBittorrent) Status(task *tool.DownloadTask) (*tool.Status, error) { return nil, err } s := &tool.Status{} + s.TotalBytes = info.Size s.Progress = float64(info.Completed) / float64(info.Size) * 100 switch info.State { case qbittorrent.UPLOADING, qbittorrent.PAUSEDUP, qbittorrent.QUEUEDUP, qbittorrent.STALLEDUP, qbittorrent.FORCEDUP, qbittorrent.CHECKINGUP: diff --git a/internal/offline_download/thunder/thunder.go b/internal/offline_download/thunder/thunder.go new file mode 100644 index 00000000..81b94861 --- /dev/null +++ b/internal/offline_download/thunder/thunder.go @@ -0,0 +1,143 @@ +package thunder + +import ( + "context" + "errors" + "fmt" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/setting" + "strconv" + + "github.com/alist-org/alist/v3/drivers/thunder" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/offline_download/tool" + "github.com/alist-org/alist/v3/internal/op" +) + +type Thunder struct { + refreshTaskCache bool +} + +func (t *Thunder) Name() string { + return "Thunder" +} + +func (t *Thunder) Items() []model.SettingItem { + return nil +} + +func (t *Thunder) Run(task *tool.DownloadTask) error { + return errs.NotSupport +} + +func (t *Thunder) Init() (string, error) { + t.refreshTaskCache = false + return "ok", nil +} + +func (t *Thunder) IsReady() bool { + tempDir := setting.GetStr(conf.ThunderTempDir) + if tempDir == "" { + return false + } + storage, _, err := op.GetStorageAndActualPath(tempDir) + if err != nil { + return false + } + if _, ok := storage.(*thunder.Thunder); !ok { + return false + } + return true +} + +func (t *Thunder) AddURL(args *tool.AddUrlArgs) (string, error) { + // 添加新任务刷新缓存 + t.refreshTaskCache = true + storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir) + if err != nil { + return "", err + } + thunderDriver, ok := storage.(*thunder.Thunder) + if !ok { + return "", fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported") + } + + ctx := context.Background() + + if err := op.MakeDir(ctx, storage, actualPath); err != nil { + return "", err + } + + parentDir, err := op.GetUnwrap(ctx, storage, actualPath) + if err != nil { + return "", err + } + + task, err := thunderDriver.OfflineDownload(ctx, args.Url, parentDir, "") + if err != nil { + return "", fmt.Errorf("failed to add offline download task: %w", err) + } + + return task.ID, nil +} + +func (t *Thunder) Remove(task *tool.DownloadTask) error { + storage, _, err := op.GetStorageAndActualPath(task.TempDir) + if err != nil { + return err + } + thunderDriver, ok := storage.(*thunder.Thunder) + if !ok { + return fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported") + } + ctx := context.Background() + err = thunderDriver.DeleteOfflineTasks(ctx, []string{task.GID}, false) + if err != nil { + return err + } + return nil +} + +func (t *Thunder) Status(task *tool.DownloadTask) (*tool.Status, error) { + storage, _, err := op.GetStorageAndActualPath(task.TempDir) + if err != nil { + return nil, err + } + thunderDriver, ok := storage.(*thunder.Thunder) + if !ok { + return nil, fmt.Errorf("unsupported storage driver for offline download, only Thunder is supported") + } + tasks, err := t.GetTasks(thunderDriver) + if err != nil { + return nil, err + } + s := &tool.Status{ + Progress: 0, + NewGID: "", + Completed: false, + Status: "the task has been deleted", + Err: nil, + } + for _, t := range tasks { + if t.ID == task.GID { + s.Progress = float64(t.Progress) + s.Status = t.Message + s.Completed = (t.Phase == "PHASE_TYPE_COMPLETE") + s.TotalBytes, err = strconv.ParseInt(t.FileSize, 10, 64) + if err != nil { + s.TotalBytes = 0 + } + if t.Phase == "PHASE_TYPE_ERROR" { + s.Err = errors.New(t.Message) + } + return s, nil + } + } + s.Err = fmt.Errorf("the task has been deleted") + return s, nil +} + +func init() { + tool.Tools.Add(&Thunder{}) +} diff --git a/internal/offline_download/thunder/util.go b/internal/offline_download/thunder/util.go new file mode 100644 index 00000000..ea400f32 --- /dev/null +++ b/internal/offline_download/thunder/util.go @@ -0,0 +1,42 @@ +package thunder + +import ( + "context" + "time" + + "github.com/Xhofe/go-cache" + "github.com/alist-org/alist/v3/drivers/thunder" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/singleflight" +) + +var taskCache = cache.NewMemCache(cache.WithShards[[]thunder.OfflineTask](16)) +var taskG singleflight.Group[[]thunder.OfflineTask] + +func (t *Thunder) GetTasks(thunderDriver *thunder.Thunder) ([]thunder.OfflineTask, error) { + key := op.Key(thunderDriver, "/drive/v1/task") + if !t.refreshTaskCache { + if tasks, ok := taskCache.Get(key); ok { + return tasks, nil + } + } + t.refreshTaskCache = false + tasks, err, _ := taskG.Do(key, func() ([]thunder.OfflineTask, error) { + ctx := context.Background() + tasks, err := thunderDriver.OfflineList(ctx, "") + if err != nil { + return nil, err + } + // 添加缓存 10s + if len(tasks) > 0 { + taskCache.Set(key, tasks, cache.WithEx[[]thunder.OfflineTask](time.Second*10)) + } else { + taskCache.Del(key) + } + return tasks, nil + }) + if err != nil { + return nil, err + } + return tasks, nil +} diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go index c7c5c781..d64e43e8 100644 --- a/internal/offline_download/tool/add.go +++ b/internal/offline_download/tool/add.go @@ -2,14 +2,22 @@ package tool import ( "context" + "net/url" + stdpath "path" "path/filepath" + _115 "github.com/alist-org/alist/v3/drivers/115" + "github.com/alist-org/alist/v3/drivers/pikpak" + "github.com/alist-org/alist/v3/drivers/thunder" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/task" "github.com/google/uuid" "github.com/pkg/errors" - "github.com/xhofe/tache" ) type DeletePolicy string @@ -28,19 +36,7 @@ type AddURLArgs struct { DeletePolicy DeletePolicy } -func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) { - // get tool - tool, err := Tools.Get(args.Tool) - if err != nil { - return nil, errors.Wrapf(err, "failed get tool") - } - // check tool is ready - if !tool.IsReady() { - // try to init tool - if _, err := tool.Init(); err != nil { - return nil, errors.Wrapf(err, "failed init tool %s", args.Tool) - } - } +func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, error) { // check storage storage, dstDirActualPath, err := op.GetStorageAndActualPath(args.DstDirPath) if err != nil { @@ -62,23 +58,58 @@ func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) { return nil, errors.WithStack(errs.NotFolder) } } + // try putting url + if args.Tool == "SimpleHttp" { + err = tryPutUrl(ctx, args.DstDirPath, args.URL) + if err == nil || !errors.Is(err, errs.NotImplement) { + return nil, err + } + } + + // get tool + tool, err := Tools.Get(args.Tool) + if err != nil { + return nil, errors.Wrapf(err, "failed get tool") + } + // check tool is ready + if !tool.IsReady() { + // try to init tool + if _, err := tool.Init(); err != nil { + return nil, errors.Wrapf(err, "failed init tool %s", args.Tool) + } + } uid := uuid.NewString() tempDir := filepath.Join(conf.Conf.TempDir, args.Tool, uid) deletePolicy := args.DeletePolicy + // 如果当前 storage 是对应网盘,则直接下载到目标路径,无需转存 switch args.Tool { case "115 Cloud": - tempDir = args.DstDirPath - // 防止将下载好的文件删除 - deletePolicy = DeleteNever - case "pikpak": - tempDir = args.DstDirPath - // 防止将下载好的文件删除 - deletePolicy = DeleteNever + if _, ok := storage.(*_115.Pan115); ok { + tempDir = args.DstDirPath + } else { + tempDir = filepath.Join(setting.GetStr(conf.Pan115TempDir), uid) + } + case "PikPak": + if _, ok := storage.(*pikpak.PikPak); ok { + tempDir = args.DstDirPath + } else { + tempDir = filepath.Join(setting.GetStr(conf.PikPakTempDir), uid) + } + case "Thunder": + if _, ok := storage.(*thunder.Thunder); ok { + tempDir = args.DstDirPath + } else { + tempDir = filepath.Join(setting.GetStr(conf.ThunderTempDir), uid) + } } - + + taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed t := &DownloadTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, Url: args.URL, DstDirPath: args.DstDirPath, TempDir: tempDir, @@ -89,3 +120,14 @@ func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) { DownloadTaskManager.Add(t) return t, nil } + +func tryPutUrl(ctx context.Context, path, urlStr string) error { + var dstName string + u, err := url.Parse(urlStr) + if err == nil { + dstName = stdpath.Base(u.Path) + } else { + dstName = "UnnamedURL" + } + return fs.PutURL(ctx, path, dstName, urlStr) +} diff --git a/internal/offline_download/tool/all_test.go b/internal/offline_download/tool/all_test.go deleted file mode 100644 index 27da5e32..00000000 --- a/internal/offline_download/tool/all_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package tool_test - -import ( - "testing" - - "github.com/alist-org/alist/v3/internal/offline_download/tool" -) - -func TestGetFiles(t *testing.T) { - files, err := tool.GetFiles("..") - if err != nil { - t.Fatal(err) - } - for _, file := range files { - t.Log(file.Name, file.Size, file.Path, file.Modified) - } -} diff --git a/internal/offline_download/tool/base.go b/internal/offline_download/tool/base.go index 3b9fb07a..b14169f8 100644 --- a/internal/offline_download/tool/base.go +++ b/internal/offline_download/tool/base.go @@ -1,10 +1,6 @@ package tool import ( - "io" - "os" - "time" - "github.com/alist-org/alist/v3/internal/model" ) @@ -16,11 +12,12 @@ type AddUrlArgs struct { } type Status struct { - Progress float64 - NewGID string - Completed bool - Status string - Err error + TotalBytes int64 + Progress float64 + NewGID string + Completed bool + Status string + Err error } type Tool interface { @@ -39,28 +36,3 @@ type Tool interface { // Run for simple http download Run(task *DownloadTask) error } - -type GetFileser interface { - // GetFiles return the files of the download task, if nil, means walk the temp dir to get the files - GetFiles(task *DownloadTask) []File -} - -type File struct { - // ReadCloser for http client - ReadCloser io.ReadCloser - Name string - Size int64 - Path string - Modified time.Time -} - -func (f *File) GetReadCloser() (io.ReadCloser, error) { - if f.ReadCloser != nil { - return f.ReadCloser, nil - } - file, err := os.Open(f.Path) - if err != nil { - return nil, err - } - return file, nil -} diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go index 4cc86a26..42b2dbfb 100644 --- a/internal/offline_download/tool/download.go +++ b/internal/offline_download/tool/download.go @@ -7,13 +7,14 @@ import ( "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/task" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/xhofe/tache" ) type DownloadTask struct { - tache.Base + task.TaskExtension Url string `json:"url"` DstDirPath string `json:"dst_dir_path"` TempDir string `json:"temp_dir"` @@ -27,6 +28,10 @@ type DownloadTask struct { } func (t *DownloadTask) Run() error { + t.ReinitCtx() + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() if t.tool == nil { tool, err := Tools.Get(t.Toolname) if err != nil { @@ -36,7 +41,7 @@ func (t *DownloadTask) Run() error { } if err := t.tool.Run(t); !errs.IsNotSupportError(err) { if err == nil { - return t.Complete() + return t.Transfer() } return err } @@ -76,7 +81,10 @@ outer: if err != nil { return err } - if t.tool.Name() == "pikpak" { + if t.tool.Name() == "Pikpak" { + return nil + } + if t.tool.Name() == "Thunder" { return nil } if t.tool.Name() == "115 Cloud" { @@ -101,6 +109,19 @@ outer: } } } + + if t.tool.Name() == "Transmission" { + // hack for transmission + seedTime := setting.GetInt(conf.TransmissionSeedtime, 0) + if seedTime >= 0 { + t.Status = "offline download completed, waiting for seeding" + <-time.After(time.Minute * time.Duration(seedTime)) + err := t.tool.Remove(t) + if err != nil { + log.Errorln(err.Error()) + } + } + } return nil } @@ -117,6 +138,7 @@ func (t *DownloadTask) Update() (bool, error) { } t.callStatusRetried = 0 t.SetProgress(info.Progress) + t.SetTotalBytes(info.TotalBytes) t.Status = fmt.Sprintf("[%s]: %s", t.tool.Name(), info.Status) if info.NewGID != "" { log.Debugf("followen by: %+v", info.NewGID) @@ -125,7 +147,7 @@ func (t *DownloadTask) Update() (bool, error) { } // if download completed if info.Completed { - err := t.Complete() + err := t.Transfer() return true, errors.WithMessage(err, "failed to transfer file") } // if download failed @@ -135,37 +157,16 @@ func (t *DownloadTask) Update() (bool, error) { return false, nil } -func (t *DownloadTask) Complete() error { - var ( - files []File - err error - ) - if t.tool.Name() == "pikpak" { - return nil - } - if t.tool.Name() == "115 Cloud" { - return nil - } - if getFileser, ok := t.tool.(GetFileser); ok { - files = getFileser.GetFiles(t) - } else { - files, err = GetFiles(t.TempDir) - if err != nil { - return errors.Wrapf(err, "failed to get files") +func (t *DownloadTask) Transfer() error { + toolName := t.tool.Name() + if toolName == "115 Cloud" || toolName == "PikPak" || toolName == "Thunder" { + // 如果不是直接下载到目标路径,则进行转存 + if t.TempDir != t.DstDirPath { + return transferObj(t.Ctx(), t.TempDir, t.DstDirPath, t.DeletePolicy) } + return nil } - // upload files - for i := range files { - file := files[i] - TransferTaskManager.Add(&TransferTask{ - file: file, - DstDirPath: t.DstDirPath, - TempDir: t.TempDir, - DeletePolicy: t.DeletePolicy, - FileDir: file.Path, - }) - } - return nil + return transferStd(t.Ctx(), t.TempDir, t.DstDirPath, t.DeletePolicy) } func (t *DownloadTask) GetName() string { diff --git a/internal/offline_download/tool/tools.go b/internal/offline_download/tool/tools.go index 9de7d526..4a31ac7f 100644 --- a/internal/offline_download/tool/tools.go +++ b/internal/offline_download/tool/tools.go @@ -3,6 +3,7 @@ package tool import ( "fmt" "github.com/alist-org/alist/v3/internal/model" + "sort" ) var ( @@ -25,8 +26,11 @@ func (t ToolsManager) Add(tool Tool) { func (t ToolsManager) Names() []string { names := make([]string, 0, len(t)) for name := range t { - names = append(names, name) + if tool, err := t.Get(name); err == nil && tool.IsReady() { + names = append(names, name) + } } + sort.Strings(names) return names } diff --git a/internal/offline_download/tool/transfer.go b/internal/offline_download/tool/transfer.go index 3744c7b5..1d5ece61 100644 --- a/internal/offline_download/tool/transfer.go +++ b/internal/offline_download/tool/transfer.go @@ -1,88 +1,72 @@ package tool import ( + "context" "fmt" - "os" - "path/filepath" - + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" "github.com/alist-org/alist/v3/pkg/utils" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/xhofe/tache" + "net/http" + "os" + stdpath "path" + "path/filepath" + "time" ) type TransferTask struct { - tache.Base - FileDir string `json:"file_dir"` - DstDirPath string `json:"dst_dir_path"` - TempDir string `json:"temp_dir"` - DeletePolicy DeletePolicy `json:"delete_policy"` - file File + task.TaskExtension + Status string `json:"-"` //don't save status to save space + SrcObjPath string `json:"src_obj_path"` + DstDirPath string `json:"dst_dir_path"` + SrcStorage driver.Driver `json:"-"` + DstStorage driver.Driver `json:"-"` + SrcStorageMp string `json:"src_storage_mp"` + DstStorageMp string `json:"dst_storage_mp"` + DeletePolicy DeletePolicy `json:"delete_policy"` } func (t *TransferTask) Run() error { - // check dstDir again - var err error - if (t.file == File{}) { - t.file, err = GetFile(t.FileDir) - if err != nil { - return errors.Wrapf(err, "failed to get file %s", t.FileDir) - } + t.ReinitCtx() + t.ClearEndTime() + t.SetStartTime(time.Now()) + defer func() { t.SetEndTime(time.Now()) }() + if t.SrcStorage == nil { + return transferStdPath(t) + } else { + return transferObjPath(t) } - storage, dstDirActualPath, err := op.GetStorageAndActualPath(t.DstDirPath) - if err != nil { - return errors.WithMessage(err, "failed get storage") - } - mimetype := utils.GetMimeType(t.file.Path) - rc, err := t.file.GetReadCloser() - if err != nil { - return errors.Wrapf(err, "failed to open file %s", t.file.Path) - } - s := &stream.FileStream{ - Ctx: nil, - Obj: &model.Object{ - Name: filepath.Base(t.file.Path), - Size: t.file.Size, - Modified: t.file.Modified, - IsFolder: false, - }, - Reader: rc, - Mimetype: mimetype, - Closers: utils.NewClosers(rc), - } - relDir, err := filepath.Rel(t.TempDir, filepath.Dir(t.file.Path)) - if err != nil { - log.Errorf("find relation directory error: %v", err) - } - newDistDir := filepath.Join(dstDirActualPath, relDir) - return op.Put(t.Ctx(), storage, newDistDir, s, t.SetProgress) } func (t *TransferTask) GetName() string { - return fmt.Sprintf("transfer %s to [%s]", t.file.Path, t.DstDirPath) + return fmt.Sprintf("transfer [%s](%s) to [%s](%s)", t.SrcStorageMp, t.SrcObjPath, t.DstStorageMp, t.DstDirPath) } func (t *TransferTask) GetStatus() string { - return "transferring" + return t.Status } func (t *TransferTask) OnSucceeded() { if t.DeletePolicy == DeleteOnUploadSucceed || t.DeletePolicy == DeleteAlways { - err := os.Remove(t.file.Path) - if err != nil { - log.Errorf("failed to delete file %s, error: %s", t.file.Path, err.Error()) + if t.SrcStorage == nil { + removeStdTemp(t) + } else { + removeObjTemp(t) } } } func (t *TransferTask) OnFailed() { if t.DeletePolicy == DeleteOnUploadFailed || t.DeletePolicy == DeleteAlways { - err := os.Remove(t.file.Path) - if err != nil { - log.Errorf("failed to delete file %s, error: %s", t.file.Path, err.Error()) + if t.SrcStorage == nil { + removeStdTemp(t) + } else { + removeObjTemp(t) } } } @@ -90,3 +74,202 @@ func (t *TransferTask) OnFailed() { var ( TransferTaskManager *tache.Manager[*TransferTask] ) + +func transferStd(ctx context.Context, tempDir, dstDirPath string, deletePolicy DeletePolicy) error { + dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) + if err != nil { + return errors.WithMessage(err, "failed get dst storage") + } + entries, err := os.ReadDir(tempDir) + if err != nil { + return err + } + taskCreator, _ := ctx.Value("user").(*model.User) + for _, entry := range entries { + t := &TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, + SrcObjPath: stdpath.Join(tempDir, entry.Name()), + DstDirPath: dstDirActualPath, + DstStorage: dstStorage, + DstStorageMp: dstStorage.GetStorage().MountPath, + DeletePolicy: deletePolicy, + } + TransferTaskManager.Add(t) + } + return nil +} + +func transferStdPath(t *TransferTask) error { + t.Status = "getting src object" + info, err := os.Stat(t.SrcObjPath) + if err != nil { + return err + } + if info.IsDir() { + t.Status = "src object is dir, listing objs" + entries, err := os.ReadDir(t.SrcObjPath) + if err != nil { + return err + } + for _, entry := range entries { + srcRawPath := stdpath.Join(t.SrcObjPath, entry.Name()) + dstObjPath := stdpath.Join(t.DstDirPath, info.Name()) + t := &TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: t.Creator, + }, + SrcObjPath: srcRawPath, + DstDirPath: dstObjPath, + DstStorage: t.DstStorage, + SrcStorageMp: t.SrcStorageMp, + DstStorageMp: t.DstStorageMp, + DeletePolicy: t.DeletePolicy, + } + TransferTaskManager.Add(t) + } + t.Status = "src object is dir, added all transfer tasks of files" + return nil + } + return transferStdFile(t) +} + +func transferStdFile(t *TransferTask) error { + rc, err := os.Open(t.SrcObjPath) + if err != nil { + return errors.Wrapf(err, "failed to open file %s", t.SrcObjPath) + } + info, err := rc.Stat() + if err != nil { + return errors.Wrapf(err, "failed to get file %s", t.SrcObjPath) + } + mimetype := utils.GetMimeType(t.SrcObjPath) + s := &stream.FileStream{ + Ctx: nil, + Obj: &model.Object{ + Name: filepath.Base(t.SrcObjPath), + Size: info.Size(), + Modified: info.ModTime(), + IsFolder: false, + }, + Reader: rc, + Mimetype: mimetype, + Closers: utils.NewClosers(rc), + } + t.SetTotalBytes(info.Size()) + return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, s, t.SetProgress) +} + +func removeStdTemp(t *TransferTask) { + info, err := os.Stat(t.SrcObjPath) + if err != nil || info.IsDir() { + return + } + if err := os.Remove(t.SrcObjPath); err != nil { + log.Errorf("failed to delete temp file %s, error: %s", t.SrcObjPath, err.Error()) + } +} + +func transferObj(ctx context.Context, tempDir, dstDirPath string, deletePolicy DeletePolicy) error { + srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(tempDir) + if err != nil { + return errors.WithMessage(err, "failed get src storage") + } + dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath) + if err != nil { + return errors.WithMessage(err, "failed get dst storage") + } + objs, err := op.List(ctx, srcStorage, srcObjActualPath, model.ListArgs{}) + if err != nil { + return errors.WithMessagef(err, "failed list src [%s] objs", tempDir) + } + taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed + for _, obj := range objs { + t := &TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: taskCreator, + }, + SrcObjPath: stdpath.Join(srcObjActualPath, obj.GetName()), + DstDirPath: dstDirActualPath, + SrcStorage: srcStorage, + DstStorage: dstStorage, + SrcStorageMp: srcStorage.GetStorage().MountPath, + DstStorageMp: dstStorage.GetStorage().MountPath, + DeletePolicy: deletePolicy, + } + TransferTaskManager.Add(t) + } + return nil +} + +func transferObjPath(t *TransferTask) error { + t.Status = "getting src object" + srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath) + if err != nil { + return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath) + } + if srcObj.IsDir() { + t.Status = "src object is dir, listing objs" + objs, err := op.List(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.ListArgs{}) + if err != nil { + return errors.WithMessagef(err, "failed list src [%s] objs", t.SrcObjPath) + } + for _, obj := range objs { + if utils.IsCanceled(t.Ctx()) { + return nil + } + srcObjPath := stdpath.Join(t.SrcObjPath, obj.GetName()) + dstObjPath := stdpath.Join(t.DstDirPath, srcObj.GetName()) + TransferTaskManager.Add(&TransferTask{ + TaskExtension: task.TaskExtension{ + Creator: t.Creator, + }, + SrcObjPath: srcObjPath, + DstDirPath: dstObjPath, + SrcStorage: t.SrcStorage, + DstStorage: t.DstStorage, + SrcStorageMp: t.SrcStorageMp, + DstStorageMp: t.DstStorageMp, + DeletePolicy: t.DeletePolicy, + }) + } + t.Status = "src object is dir, added all transfer tasks of objs" + return nil + } + return transferObjFile(t) +} + +func transferObjFile(t *TransferTask) error { + srcFile, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath) + if err != nil { + return errors.WithMessagef(err, "failed get src [%s] file", t.SrcObjPath) + } + link, _, err := op.Link(t.Ctx(), t.SrcStorage, t.SrcObjPath, model.LinkArgs{ + Header: http.Header{}, + }) + if err != nil { + return errors.WithMessagef(err, "failed get [%s] link", t.SrcObjPath) + } + fs := stream.FileStream{ + Obj: srcFile, + Ctx: t.Ctx(), + } + // any link provided is seekable + ss, err := stream.NewSeekableStream(fs, link) + if err != nil { + return errors.WithMessagef(err, "failed get [%s] stream", t.SrcObjPath) + } + t.SetTotalBytes(srcFile.GetSize()) + return op.Put(t.Ctx(), t.DstStorage, t.DstDirPath, ss, t.SetProgress) +} + +func removeObjTemp(t *TransferTask) { + srcObj, err := op.Get(t.Ctx(), t.SrcStorage, t.SrcObjPath) + if err != nil || srcObj.IsDir() { + return + } + if err := op.Remove(t.Ctx(), t.SrcStorage, t.SrcObjPath); err != nil { + log.Errorf("failed to delete temp obj %s, error: %s", t.SrcObjPath, err.Error()) + } +} diff --git a/internal/offline_download/tool/util.go b/internal/offline_download/tool/util.go deleted file mode 100644 index b2c6ec02..00000000 --- a/internal/offline_download/tool/util.go +++ /dev/null @@ -1,41 +0,0 @@ -package tool - -import ( - "os" - "path/filepath" -) - -func GetFiles(dir string) ([]File, error) { - var files []File - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - files = append(files, File{ - Name: info.Name(), - Size: info.Size(), - Path: path, - Modified: info.ModTime(), - }) - } - return nil - }) - if err != nil { - return nil, err - } - return files, nil -} - -func GetFile(path string) (File, error) { - info, err := os.Stat(path) - if err != nil { - return File{}, err - } - return File{ - Name: info.Name(), - Size: info.Size(), - Path: path, - Modified: info.ModTime(), - }, nil -} diff --git a/internal/offline_download/transmission/client.go b/internal/offline_download/transmission/client.go new file mode 100644 index 00000000..ae136009 --- /dev/null +++ b/internal/offline_download/transmission/client.go @@ -0,0 +1,177 @@ +package transmission + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strconv" + + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/offline_download/tool" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/hekmon/transmissionrpc/v3" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +type Transmission struct { + client *transmissionrpc.Client +} + +func (t *Transmission) Run(task *tool.DownloadTask) error { + return errs.NotSupport +} + +func (t *Transmission) Name() string { + return "Transmission" +} + +func (t *Transmission) Items() []model.SettingItem { + // transmission settings + return []model.SettingItem{ + {Key: conf.TransmissionUri, Value: "http://localhost:9091/transmission/rpc", Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + {Key: conf.TransmissionSeedtime, Value: "0", Type: conf.TypeNumber, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } +} + +func (t *Transmission) Init() (string, error) { + t.client = nil + uri := setting.GetStr(conf.TransmissionUri) + endpoint, err := url.Parse(uri) + if err != nil { + return "", errors.Wrap(err, "failed to init transmission client") + } + c, err := transmissionrpc.New(endpoint, nil) + if err != nil { + return "", errors.Wrap(err, "failed to init transmission client") + } + + ok, serverVersion, serverMinimumVersion, err := c.RPCVersion(context.Background()) + if err != nil { + return "", errors.Wrapf(err, "failed get transmission version") + } + + if !ok { + return "", fmt.Errorf("remote transmission RPC version (v%d) is incompatible with the transmission library (v%d): remote needs at least v%d", + serverVersion, transmissionrpc.RPCVersion, serverMinimumVersion) + } + + t.client = c + log.Infof("remote transmission RPC version (v%d) is compatible with our transmissionrpc library (v%d)\n", + serverVersion, transmissionrpc.RPCVersion) + log.Infof("using transmission version: %d", serverVersion) + return fmt.Sprintf("transmission version: %d", serverVersion), nil +} + +func (t *Transmission) IsReady() bool { + return t.client != nil +} + +func (t *Transmission) AddURL(args *tool.AddUrlArgs) (string, error) { + endpoint, err := url.Parse(args.Url) + if err != nil { + return "", errors.Wrap(err, "failed to parse transmission uri") + } + + rpcPayload := transmissionrpc.TorrentAddPayload{ + DownloadDir: &args.TempDir, + } + // http url for .torrent file + if endpoint.Scheme == "http" || endpoint.Scheme == "https" { + resp, err := http.Get(args.Url) + if err != nil { + return "", errors.Wrap(err, "failed to get .torrent file") + } + defer resp.Body.Close() + buffer := new(bytes.Buffer) + encoder := base64.NewEncoder(base64.StdEncoding, buffer) + // Stream file to the encoder + if _, err = utils.CopyWithBuffer(encoder, resp.Body); err != nil { + return "", errors.Wrap(err, "can't copy file content into the base64 encoder") + } + // Flush last bytes + if err = encoder.Close(); err != nil { + return "", errors.Wrap(err, "can't flush last bytes of the base64 encoder") + } + // Get the string form + b64 := buffer.String() + rpcPayload.MetaInfo = &b64 + } else { // magnet uri + rpcPayload.Filename = &args.Url + } + + torrent, err := t.client.TorrentAdd(context.TODO(), rpcPayload) + if err != nil { + return "", err + } + + if torrent.ID == nil { + return "", fmt.Errorf("failed get torrent ID") + } + gid := strconv.FormatInt(*torrent.ID, 10) + return gid, nil +} + +func (t *Transmission) Remove(task *tool.DownloadTask) error { + gid, err := strconv.ParseInt(task.GID, 10, 64) + if err != nil { + return err + } + err = t.client.TorrentRemove(context.TODO(), transmissionrpc.TorrentRemovePayload{ + IDs: []int64{gid}, + DeleteLocalData: false, + }) + return err +} + +func (t *Transmission) Status(task *tool.DownloadTask) (*tool.Status, error) { + gid, err := strconv.ParseInt(task.GID, 10, 64) + if err != nil { + return nil, err + } + infos, err := t.client.TorrentGetAllFor(context.TODO(), []int64{gid}) + if err != nil { + return nil, err + } + + if len(infos) < 1 { + return nil, fmt.Errorf("failed get status, wrong gid: %s", task.GID) + } + info := infos[0] + + s := &tool.Status{ + Completed: *info.IsFinished, + Err: err, + } + s.Progress = *info.PercentDone * 100 + s.TotalBytes = int64(*info.SizeWhenDone / 8) + + switch *info.Status { + case transmissionrpc.TorrentStatusCheckWait, + transmissionrpc.TorrentStatusDownloadWait, + transmissionrpc.TorrentStatusCheck, + transmissionrpc.TorrentStatusDownload, + transmissionrpc.TorrentStatusIsolated: + s.Status = "[transmission] " + info.Status.String() + case transmissionrpc.TorrentStatusSeedWait, + transmissionrpc.TorrentStatusSeed: + s.Completed = true + case transmissionrpc.TorrentStatusStopped: + s.Err = errors.Errorf("[transmission] failed to download %s, status: %s, error: %s", task.GID, info.Status.String(), *info.ErrorString) + default: + s.Err = errors.Errorf("[transmission] unknown status occurred downloading %s, err: %s", task.GID, *info.ErrorString) + } + return s, nil +} + +var _ tool.Tool = (*Transmission)(nil) + +func init() { + tool.Tools.Add(&Transmission{}) +} diff --git a/internal/op/archive.go b/internal/op/archive.go new file mode 100644 index 00000000..38b870c7 --- /dev/null +++ b/internal/op/archive.go @@ -0,0 +1,518 @@ +package op + +import ( + "context" + stderrors "errors" + "fmt" + "io" + stdpath "path" + "strings" + "time" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/stream" + + "github.com/Xhofe/go-cache" + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/singleflight" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64)) +var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider] + +func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + path = utils.FixAndCleanPath(path) + key := Key(storage, path) + if !args.Refresh { + if meta, ok := archiveMetaCache.Get(key); ok { + log.Debugf("use cache when get %s archive meta", path) + return meta, nil + } + } + fn := func() (*model.ArchiveMetaProvider, error) { + _, m, err := getArchiveMeta(ctx, storage, path, args) + if err != nil { + return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err) + } + if m.Expiration != nil { + archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration)) + } + return m, nil + } + if storage.Config().OnlyLocal { + meta, err := fn() + return meta, err + } + meta, err, _ := archiveMetaG.Do(key, fn) + return meta, err +} + +func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, []*stream.SeekableStream, error) { + l, obj, err := Link(ctx, storage, path, args) + if err != nil { + return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path) + } + baseName, ext, found := strings.Cut(obj.GetName(), ".") + if !found { + if l.MFile != nil { + _ = l.MFile.Close() + } + if l.RangeReadCloser != nil { + _ = l.RangeReadCloser.Close() + } + return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.") + } + partExt, t, err := tool.GetArchiveTool("." + ext) + if err != nil { + var e error + partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName())) + if e != nil { + if l.MFile != nil { + _ = l.MFile.Close() + } + if l.RangeReadCloser != nil { + _ = l.RangeReadCloser.Close() + } + return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext) + } + } + ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l) + if err != nil { + if l.MFile != nil { + _ = l.MFile.Close() + } + if l.RangeReadCloser != nil { + _ = l.RangeReadCloser.Close() + } + return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path) + } + ret := []*stream.SeekableStream{ss} + if partExt == nil { + return obj, t, ret, nil + } else { + index := partExt.SecondPartIndex + dir := stdpath.Dir(path) + for { + p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index)) + var o model.Obj + l, o, err = Link(ctx, storage, p, args) + if err != nil { + break + } + ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l) + if err != nil { + if l.MFile != nil { + _ = l.MFile.Close() + } + if l.RangeReadCloser != nil { + _ = l.RangeReadCloser.Close() + } + for _, s := range ret { + _ = s.Close() + } + return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path) + } + ret = append(ret, ss) + index++ + } + return obj, t, ret, nil + } +} + +func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) { + storageAr, ok := storage.(driver.ArchiveReader) + if ok { + obj, err := GetUnwrap(ctx, storage, path) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed to get file") + } + if obj.IsDir() { + return nil, nil, errors.WithStack(errs.NotFile) + } + meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs) + if !errors.Is(err, errs.NotImplement) { + archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true} + if meta != nil && meta.GetTree() != nil { + archiveMetaProvider.Sort = &storage.GetStorage().Sort + } + if !storage.Config().NoCache { + Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) + archiveMetaProvider.Expiration = &Expiration + } + return obj, archiveMetaProvider, err + } + } + obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + if err != nil { + return nil, nil, err + } + defer func() { + var e error + for _, s := range ss { + e = stderrors.Join(e, s.Close()) + } + if e != nil { + log.Errorf("failed to close file streamer, %v", e) + } + }() + meta, err := t.GetMeta(ss, args.ArchiveArgs) + if err != nil { + return nil, nil, err + } + archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false} + if meta.GetTree() != nil { + archiveMetaProvider.Sort = &storage.GetStorage().Sort + } + if !storage.Config().NoCache { + Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) + archiveMetaProvider.Expiration = &Expiration + } else if ss[0].Link.MFile == nil { + // alias、crypt 驱动 + archiveMetaProvider.Expiration = ss[0].Link.Expiration + } + return obj, archiveMetaProvider, err +} + +var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64)) +var archiveListG singleflight.Group[[]model.Obj] + +func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + path = utils.FixAndCleanPath(path) + metaKey := Key(storage, path) + key := stdpath.Join(metaKey, args.InnerPath) + if !args.Refresh { + if files, ok := archiveListCache.Get(key); ok { + log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath) + return files, nil + } + // if meta, ok := archiveMetaCache.Get(metaKey); ok { + // log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath) + // return getChildrenFromArchiveMeta(meta, args.InnerPath) + // } + } + objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) { + obj, files, err := listArchive(ctx, storage, path, args) + if err != nil { + return nil, errors.Wrapf(err, "failed to list archive [%s]%s: %+v", path, args.InnerPath, err) + } + // set path + for _, f := range files { + if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && obj.GetPath() != "" { + s.SetPath(stdpath.Join(obj.GetPath(), args.InnerPath, f.GetName())) + } + } + // warp obj name + model.WrapObjsName(files) + // sort objs + if storage.Config().LocalSort { + model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection) + } + model.ExtractFolder(files, storage.GetStorage().ExtractFolder) + if !storage.Config().NoCache { + if len(files) > 0 { + log.Debugf("set cache: %s => %+v", key, files) + archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) + } else { + log.Debugf("del cache: %s", key) + archiveListCache.Del(key) + } + } + return files, nil + }) + return objs, err +} + +func _listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) { + storageAr, ok := storage.(driver.ArchiveReader) + if ok { + obj, err := GetUnwrap(ctx, storage, path) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed to get file") + } + if obj.IsDir() { + return nil, nil, errors.WithStack(errs.NotFile) + } + files, err := storageAr.ListArchive(ctx, obj, args.ArchiveInnerArgs) + if !errors.Is(err, errs.NotImplement) { + return obj, files, err + } + } + obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + if err != nil { + return nil, nil, err + } + defer func() { + var e error + for _, s := range ss { + e = stderrors.Join(e, s.Close()) + } + if e != nil { + log.Errorf("failed to close file streamer, %v", e) + } + }() + files, err := t.List(ss, args.ArchiveInnerArgs) + return obj, files, err +} + +func listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) { + obj, files, err := _listArchive(ctx, storage, path, args) + if errors.Is(err, errs.NotSupport) { + var meta model.ArchiveMeta + meta, err = GetArchiveMeta(ctx, storage, path, model.ArchiveMetaArgs{ + ArchiveArgs: args.ArchiveArgs, + Refresh: args.Refresh, + }) + if err != nil { + return nil, nil, err + } + files, err = getChildrenFromArchiveMeta(meta, args.InnerPath) + if err != nil { + return nil, nil, err + } + } + if err == nil && obj == nil { + obj, err = GetUnwrap(ctx, storage, path) + } + if err != nil { + return nil, nil, err + } + return obj, files, err +} + +func getChildrenFromArchiveMeta(meta model.ArchiveMeta, innerPath string) ([]model.Obj, error) { + obj := meta.GetTree() + if obj == nil { + return nil, errors.WithStack(errs.NotImplement) + } + dirs := splitPath(innerPath) + for _, dir := range dirs { + var next model.ObjTree + for _, c := range obj { + if c.GetName() == dir { + next = c + break + } + } + if next == nil { + return nil, errors.WithStack(errs.ObjectNotFound) + } + if !next.IsDir() || next.GetChildren() == nil { + return nil, errors.WithStack(errs.NotFolder) + } + obj = next.GetChildren() + } + return utils.SliceConvert(obj, func(src model.ObjTree) (model.Obj, error) { + return src, nil + }) +} + +func splitPath(path string) []string { + var parts []string + for { + dir, file := stdpath.Split(path) + if file == "" { + break + } + parts = append([]string{file}, parts...) + path = strings.TrimSuffix(dir, "/") + } + return parts +} + +func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, model.Obj, error) { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + path = utils.FixAndCleanPath(path) + af, err := GetUnwrap(ctx, storage, path) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed to get file") + } + if af.IsDir() { + return nil, nil, errors.WithStack(errs.NotFile) + } + if g, ok := storage.(driver.ArchiveGetter); ok { + obj, err := g.ArchiveGet(ctx, af, args.ArchiveInnerArgs) + if err == nil { + return af, model.WrapObjName(obj), nil + } + } + + if utils.PathEqual(args.InnerPath, "/") { + return af, &model.ObjWrapName{ + Name: RootName, + Obj: &model.Object{ + Name: af.GetName(), + Path: af.GetPath(), + ID: af.GetID(), + Size: af.GetSize(), + Modified: af.ModTime(), + IsFolder: true, + }, + }, nil + } + + innerDir, name := stdpath.Split(args.InnerPath) + args.InnerPath = strings.TrimSuffix(innerDir, "/") + files, err := ListArchive(ctx, storage, path, args) + if err != nil { + return nil, nil, errors.WithMessage(err, "failed get parent list") + } + for _, f := range files { + if f.GetName() == name { + return af, f, nil + } + } + return nil, nil, errors.WithStack(errs.ObjectNotFound) +} + +type extractLink struct { + Link *model.Link + Obj model.Obj +} + +var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16)) +var extractG singleflight.Group[*extractLink] + +func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + key := stdpath.Join(Key(storage, path), args.InnerPath) + if link, ok := extractCache.Get(key); ok { + return link.Link, link.Obj, nil + } else if link, ok := extractCache.Get(key + ":" + args.IP); ok { + return link.Link, link.Obj, nil + } + fn := func() (*extractLink, error) { + link, err := driverExtract(ctx, storage, path, args) + if err != nil { + return nil, errors.Wrapf(err, "failed extract archive") + } + if link.Link.Expiration != nil { + if link.Link.IPCacheKey { + key = key + ":" + args.IP + } + extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration)) + } + return link, nil + } + if storage.Config().OnlyLocal { + link, err := fn() + if err != nil { + return nil, nil, err + } + return link.Link, link.Obj, nil + } + link, err, _ := extractG.Do(key, fn) + if err != nil { + return nil, nil, err + } + return link.Link, link.Obj, err +} + +func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) { + storageAr, ok := storage.(driver.ArchiveReader) + if !ok { + return nil, errs.DriverExtractNotSupported + } + archiveFile, extracted, err := ArchiveGet(ctx, storage, path, model.ArchiveListArgs{ + ArchiveInnerArgs: args, + Refresh: false, + }) + if err != nil { + return nil, errors.WithMessage(err, "failed to get file") + } + if extracted.IsDir() { + return nil, errors.WithStack(errs.NotFile) + } + link, err := storageAr.Extract(ctx, archiveFile, args) + return &extractLink{Link: link, Obj: extracted}, err +} + +type streamWithParent struct { + rc io.ReadCloser + parents []*stream.SeekableStream +} + +func (s *streamWithParent) Read(p []byte) (int, error) { + return s.rc.Read(p) +} + +func (s *streamWithParent) Close() error { + err := s.rc.Close() + for _, ss := range s.parents { + err = stderrors.Join(err, ss.Close()) + } + return err +} + +func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { + _, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs) + if err != nil { + return nil, 0, err + } + rc, size, err := t.Extract(ss, args) + if err != nil { + var e error + for _, s := range ss { + e = stderrors.Join(e, s.Close()) + } + if e != nil { + log.Errorf("failed to close file streamer, %v", e) + err = stderrors.Join(err, e) + } + return nil, 0, err + } + return &streamWithParent{rc: rc, parents: ss}, size, nil +} + +func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + srcPath = utils.FixAndCleanPath(srcPath) + dstDirPath = utils.FixAndCleanPath(dstDirPath) + srcObj, err := GetUnwrap(ctx, storage, srcPath) + if err != nil { + return errors.WithMessage(err, "failed to get src object") + } + dstDir, err := GetUnwrap(ctx, storage, dstDirPath) + if err != nil { + return errors.WithMessage(err, "failed to get dst dir") + } + + switch s := storage.(type) { + case driver.ArchiveDecompressResult: + var newObjs []model.Obj + newObjs, err = s.ArchiveDecompress(ctx, srcObj, dstDir, args) + if err == nil { + if newObjs != nil && len(newObjs) > 0 { + for _, newObj := range newObjs { + addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + } + } else if !utils.IsBool(lazyCache...) { + ClearCache(storage, dstDirPath) + } + } + case driver.ArchiveDecompress: + err = s.ArchiveDecompress(ctx, srcObj, dstDir, args) + if err == nil && !utils.IsBool(lazyCache...) { + ClearCache(storage, dstDirPath) + } + default: + return errs.NotImplement + } + return errors.WithStack(err) +} diff --git a/internal/op/driver.go b/internal/op/driver.go index 4f10e8e2..41b6f6d4 100644 --- a/internal/op/driver.go +++ b/internal/op/driver.go @@ -133,6 +133,12 @@ func getMainItems(config driver.Config) []driver.Item { Type: conf.TypeSelect, Options: "front,back", }) + items = append(items, driver.Item{ + Name: "disable_index", + Type: conf.TypeBool, + Default: "false", + Required: true, + }) items = append(items, driver.Item{ Name: "enable_sign", Type: conf.TypeBool, diff --git a/internal/op/fs.go b/internal/op/fs.go index e49c941a..64e99335 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -3,12 +3,14 @@ package op import ( "context" stdpath "path" + "slices" "time" "github.com/Xhofe/go-cache" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/generic_sync" "github.com/alist-org/alist/v3/pkg/singleflight" "github.com/alist-org/alist/v3/pkg/utils" @@ -25,6 +27,12 @@ func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj key := Key(storage, path) objs, ok := listCache.Get(key) if ok { + for i, obj := range objs { + if obj.GetName() == newObj.GetName() { + objs = slices.Delete(objs, i, i+1) + break + } + } for i, obj := range objs { if obj.GetName() == oldObj.GetName() { objs[i] = newObj @@ -510,6 +518,12 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod log.Errorf("failed to close file streamer, %v", err) } }() + // UrlTree PUT + if storage.GetStorage().Driver == "UrlTree" { + var link string + dstDirPath, link = urlTreeSplitLineFormPath(stdpath.Join(dstDirPath, file.GetName())) + file = &stream.FileStream{Obj: &model.Object{Name: link}} + } // if file exist and size = 0, delete it dstDirPath = utils.FixAndCleanPath(dstDirPath) dstPath := stdpath.Join(dstDirPath, file.GetName()) @@ -586,3 +600,43 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod } return errors.WithStack(err) } + +func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url string, lazyCache ...bool) error { + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return errors.Errorf("storage not init: %s", storage.GetStorage().Status) + } + dstDirPath = utils.FixAndCleanPath(dstDirPath) + _, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName)) + if err == nil { + return errors.New("obj already exists") + } + err = MakeDir(ctx, storage, dstDirPath) + if err != nil { + return errors.WithMessagef(err, "failed to put url") + } + dstDir, err := GetUnwrap(ctx, storage, dstDirPath) + if err != nil { + return errors.WithMessagef(err, "failed to put url") + } + switch s := storage.(type) { + case driver.PutURLResult: + var newObj model.Obj + newObj, err = s.PutURL(ctx, dstDir, dstName, url) + if err == nil { + if newObj != nil { + addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + } else if !utils.IsBool(lazyCache...) { + ClearCache(storage, dstDirPath) + } + } + case driver.PutURL: + err = s.PutURL(ctx, dstDir, dstName, url) + if err == nil && !utils.IsBool(lazyCache...) { + ClearCache(storage, dstDirPath) + } + default: + return errs.NotImplement + } + log.Debugf("put url [%s](%s) done", dstName, url) + return errors.WithStack(err) +} diff --git a/internal/op/path.go b/internal/op/path.go index 27f7e183..912a0000 100644 --- a/internal/op/path.go +++ b/internal/op/path.go @@ -2,6 +2,7 @@ package op import ( "github.com/alist-org/alist/v3/internal/errs" + stdpath "path" "strings" "github.com/alist-org/alist/v3/internal/driver" @@ -27,3 +28,30 @@ func GetStorageAndActualPath(rawPath string) (storage driver.Driver, actualPath actualPath = utils.FixAndCleanPath(strings.TrimPrefix(rawPath, mountPath)) return } + +// urlTreeSplitLineFormPath 分割path中分割真实路径和UrlTree定义字符串 +func urlTreeSplitLineFormPath(path string) (pp string, file string) { + // url.PathUnescape 会移除 // ,手动加回去 + path = strings.Replace(path, "https:/", "https://", 1) + path = strings.Replace(path, "http:/", "http://", 1) + if strings.Contains(path, ":https:/") || strings.Contains(path, ":http:/") { + // URL-Tree模式 /url_tree_drivr/file_name[:size[:time]]:https://example.com/file + fPath := strings.SplitN(path, ":", 2)[0] + pp, _ = stdpath.Split(fPath) + file = path[len(pp):] + } else if strings.Contains(path, "/https:/") || strings.Contains(path, "/http:/") { + // URL-Tree模式 /url_tree_drivr/https://example.com/file + index := strings.Index(path, "/http://") + if index == -1 { + index = strings.Index(path, "/https://") + } + pp = path[:index] + file = path[index+1:] + } else { + pp, file = stdpath.Split(path) + } + if pp == "" { + pp = "/" + } + return +} diff --git a/internal/op/setting.go b/internal/op/setting.go index 83d19c12..36a792b0 100644 --- a/internal/op/setting.go +++ b/internal/op/setting.go @@ -26,9 +26,18 @@ var settingGroupCacheF = func(key string, item []model.SettingItem) { settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour)) } -func settingCacheUpdate() { +var settingChangingCallbacks = make([]func(), 0) + +func RegisterSettingChangingCallback(f func()) { + settingChangingCallbacks = append(settingChangingCallbacks, f) +} + +func SettingCacheUpdate() { settingCache.Clear() settingGroupCache.Clear() + for _, cb := range settingChangingCallbacks { + cb() + } } func GetPublicSettingsMap() map[string]string { @@ -167,7 +176,7 @@ func SaveSettingItems(items []model.SettingItem) error { } } if len(errs) < len(items)-len(noHookItems)+1 { - settingCacheUpdate() + SettingCacheUpdate() } return utils.MergeErrors(errs...) } @@ -181,7 +190,7 @@ func SaveSettingItem(item *model.SettingItem) (err error) { if err = db.SaveSettingItem(item); err != nil { return err } - settingCacheUpdate() + SettingCacheUpdate() return nil } @@ -193,6 +202,6 @@ func DeleteSettingItemByKey(key string) error { if !old.IsDeprecated() { return errors.Errorf("setting [%s] is not deprecated", key) } - settingCacheUpdate() + SettingCacheUpdate() return db.DeleteSettingItemByKey(key) } diff --git a/internal/op/sshkey.go b/internal/op/sshkey.go new file mode 100644 index 00000000..139698e6 --- /dev/null +++ b/internal/op/sshkey.go @@ -0,0 +1,47 @@ +package op + +import ( + "github.com/alist-org/alist/v3/internal/db" + "github.com/alist-org/alist/v3/internal/model" + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + "time" +) + +func CreateSSHPublicKey(k *model.SSHPublicKey) (error, bool) { + _, err := db.GetSSHPublicKeyByUserTitle(k.UserId, k.Title) + if err == nil { + return errors.New("key with the same title already exists"), true + } + pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.KeyStr)) + if err != nil { + return err, false + } + k.Fingerprint = ssh.FingerprintSHA256(pubKey) + k.AddedTime = time.Now() + k.LastUsedTime = k.AddedTime + return db.CreateSSHPublicKey(k), true +} + +func GetSSHPublicKeyByUserId(userId uint, pageIndex, pageSize int) (keys []model.SSHPublicKey, count int64, err error) { + return db.GetSSHPublicKeyByUserId(userId, pageIndex, pageSize) +} + +func GetSSHPublicKeyByIdAndUserId(id uint, userId uint) (*model.SSHPublicKey, error) { + key, err := db.GetSSHPublicKeyById(id) + if err != nil { + return nil, err + } + if key.UserId != userId { + return nil, errors.Wrapf(err, "failed get old key") + } + return key, nil +} + +func UpdateSSHPublicKey(k *model.SSHPublicKey) error { + return db.UpdateSSHPublicKey(k) +} + +func DeleteSSHPublicKeyById(keyId uint) error { + return db.DeleteSSHPublicKeyById(keyId) +} diff --git a/internal/op/storage.go b/internal/op/storage.go index 6790a8df..f957f95b 100644 --- a/internal/op/storage.go +++ b/internal/op/storage.go @@ -10,6 +10,7 @@ import ( "github.com/alist-org/alist/v3/internal/db" "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/generic_sync" "github.com/alist-org/alist/v3/pkg/utils" @@ -101,11 +102,34 @@ func initStorage(ctx context.Context, storage model.Storage, storageDriver drive log.Errorf("panic init storage: %s", errInfo) driverStorage.SetStatus(errInfo) MustSaveDriverStorage(storageDriver) - storagesMap.Delete(driverStorage.MountPath) + storagesMap.Store(driverStorage.MountPath, storageDriver) } }() // Unmarshal Addition err = utils.Json.UnmarshalFromString(driverStorage.Addition, storageDriver.GetAddition()) + if err == nil { + if ref, ok := storageDriver.(driver.Reference); ok { + if strings.HasPrefix(driverStorage.Remark, "ref:/") { + refMountPath := driverStorage.Remark + i := strings.Index(refMountPath, "\n") + if i > 0 { + refMountPath = refMountPath[4:i] + } else { + refMountPath = refMountPath[4:] + } + var refStorage driver.Driver + refStorage, err = GetStorageByMountPath(refMountPath) + if err != nil { + err = fmt.Errorf("ref: %w", err) + } else { + err = ref.InitReference(refStorage) + if err != nil && errs.IsNotSupportError(err) { + err = fmt.Errorf("ref: storage is not %s", storageDriver.Config().Name) + } + } + } + } + } if err == nil { err = storageDriver.Init(ctx) } diff --git a/internal/search/build.go b/internal/search/build.go index 9865b298..2888c1f4 100644 --- a/internal/search/build.go +++ b/internal/search/build.go @@ -157,6 +157,11 @@ func BuildIndex(ctx context.Context, indexPaths, ignorePaths []string, maxDepth return filepath.SkipDir } } + if storage, _, err := op.GetStorageAndActualPath(indexPath); err == nil { + if storage.GetStorage().DisableIndex { + return filepath.SkipDir + } + } // ignore root if indexPath == "/" { return nil diff --git a/internal/search/util.go b/internal/search/util.go index 8d03b740..2e6ac8da 100644 --- a/internal/search/util.go +++ b/internal/search/util.go @@ -38,7 +38,7 @@ func WriteProgress(progress *model.IndexProgress) { } } -func updateIgnorePaths() { +func updateIgnorePaths(customIgnorePaths string) { storages := op.GetAllStorages() ignorePaths := make([]string, 0) var skipDrivers = []string{"AList V2", "AList V3", "Virtual"} @@ -66,7 +66,6 @@ func updateIgnorePaths() { } } } - customIgnorePaths := setting.GetStr(conf.IgnorePaths) if customIgnorePaths != "" { ignorePaths = append(ignorePaths, strings.Split(customIgnorePaths, "\n")...) } @@ -84,13 +83,13 @@ func isIgnorePath(path string) bool { func init() { op.RegisterSettingItemHook(conf.IgnorePaths, func(item *model.SettingItem) error { - updateIgnorePaths() + updateIgnorePaths(item.Value) return nil }) op.RegisterStorageHook(func(typ string, storage driver.Driver) { var skipDrivers = []string{"AList V2", "AList V3", "Virtual"} if utils.SliceContains(skipDrivers, storage.Config().Name) { - updateIgnorePaths() + updateIgnorePaths(setting.GetStr(conf.IgnorePaths)) } }) } diff --git a/internal/sign/archive.go b/internal/sign/archive.go new file mode 100644 index 00000000..26a2c208 --- /dev/null +++ b/internal/sign/archive.go @@ -0,0 +1,41 @@ +package sign + +import ( + "sync" + "time" + + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/sign" +) + +var onceArchive sync.Once +var instanceArchive sign.Sign + +func SignArchive(data string) string { + expire := setting.GetInt(conf.LinkExpiration, 0) + if expire == 0 { + return NotExpiredArchive(data) + } else { + return WithDurationArchive(data, time.Duration(expire)*time.Hour) + } +} + +func WithDurationArchive(data string, d time.Duration) string { + onceArchive.Do(InstanceArchive) + return instanceArchive.Sign(data, time.Now().Add(d).Unix()) +} + +func NotExpiredArchive(data string) string { + onceArchive.Do(InstanceArchive) + return instanceArchive.Sign(data, 0) +} + +func VerifyArchive(data string, sign string) error { + onceArchive.Do(InstanceArchive) + return instanceArchive.Verify(data, sign) +} + +func InstanceArchive() { + instanceArchive = sign.NewHMACSign([]byte(setting.GetStr(conf.Token) + "-archive")) +} diff --git a/internal/stream/limit.go b/internal/stream/limit.go new file mode 100644 index 00000000..14d0efd0 --- /dev/null +++ b/internal/stream/limit.go @@ -0,0 +1,152 @@ +package stream + +import ( + "context" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/alist-org/alist/v3/pkg/utils" + "golang.org/x/time/rate" + "io" + "time" +) + +type Limiter interface { + Limit() rate.Limit + Burst() int + TokensAt(time.Time) float64 + Tokens() float64 + Allow() bool + AllowN(time.Time, int) bool + Reserve() *rate.Reservation + ReserveN(time.Time, int) *rate.Reservation + Wait(context.Context) error + WaitN(context.Context, int) error + SetLimit(rate.Limit) + SetLimitAt(time.Time, rate.Limit) + SetBurst(int) + SetBurstAt(time.Time, int) +} + +var ( + ClientDownloadLimit Limiter + ClientUploadLimit Limiter + ServerDownloadLimit Limiter + ServerUploadLimit Limiter +) + +type RateLimitReader struct { + io.Reader + Limiter Limiter + Ctx context.Context +} + +func (r *RateLimitReader) Read(p []byte) (n int, err error) { + if r.Ctx != nil && utils.IsCanceled(r.Ctx) { + return 0, r.Ctx.Err() + } + n, err = r.Reader.Read(p) + if err != nil { + return + } + if r.Limiter != nil { + if r.Ctx == nil { + r.Ctx = context.Background() + } + err = r.Limiter.WaitN(r.Ctx, n) + } + return +} + +func (r *RateLimitReader) Close() error { + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +type RateLimitWriter struct { + io.Writer + Limiter Limiter + Ctx context.Context +} + +func (w *RateLimitWriter) Write(p []byte) (n int, err error) { + if w.Ctx != nil && utils.IsCanceled(w.Ctx) { + return 0, w.Ctx.Err() + } + n, err = w.Writer.Write(p) + if err != nil { + return + } + if w.Limiter != nil { + if w.Ctx == nil { + w.Ctx = context.Background() + } + err = w.Limiter.WaitN(w.Ctx, n) + } + return +} + +func (w *RateLimitWriter) Close() error { + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} + +type RateLimitFile struct { + model.File + Limiter Limiter + Ctx context.Context +} + +func (r *RateLimitFile) Read(p []byte) (n int, err error) { + if r.Ctx != nil && utils.IsCanceled(r.Ctx) { + return 0, r.Ctx.Err() + } + n, err = r.File.Read(p) + if err != nil { + return + } + if r.Limiter != nil { + if r.Ctx == nil { + r.Ctx = context.Background() + } + err = r.Limiter.WaitN(r.Ctx, n) + } + return +} + +func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) { + if r.Ctx != nil && utils.IsCanceled(r.Ctx) { + return 0, r.Ctx.Err() + } + n, err = r.File.ReadAt(p, off) + if err != nil { + return + } + if r.Limiter != nil { + if r.Ctx == nil { + r.Ctx = context.Background() + } + err = r.Limiter.WaitN(r.Ctx, n) + } + return +} + +type RateLimitRangeReadCloser struct { + model.RangeReadCloserIF + Limiter Limiter +} + +func (rrc *RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { + rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange) + if err != nil { + return nil, err + } + return &RateLimitReader{ + Reader: rc, + Limiter: rrc.Limiter, + Ctx: ctx, + }, nil +} diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 2c9543c1..64160915 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -6,12 +6,15 @@ import ( "errors" "fmt" "io" + "math" "os" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" + "github.com/sirupsen/logrus" + "go4.org/readerutil" ) type FileStream struct { @@ -60,6 +63,8 @@ func (f *FileStream) Close() error { err2 = os.RemoveAll(f.tmpFile.Name()) if err2 != nil { err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name()) + } else { + f.tmpFile = nil } } @@ -89,7 +94,17 @@ func (f *FileStream) CacheFullInTempFile() (model.File, error) { f.Add(tmpF) f.tmpFile = tmpF f.Reader = tmpF - return f.tmpFile, nil + return tmpF, nil +} + +func (f *FileStream) GetFile() model.File { + if f.tmpFile != nil { + return f.tmpFile + } + if file, ok := f.Reader.(model.File); ok { + return file + } + return nil } const InMemoryBufMaxSize = 10 // Megabytes @@ -99,33 +114,39 @@ const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024 // also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { if httpRange.Length == -1 { - httpRange.Length = f.GetSize() + // 参考 internal/net/request.go + httpRange.Length = f.GetSize() - httpRange.Start } - if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) { + size := httpRange.Start + httpRange.Length + if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) { return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil } - if f.tmpFile == nil { - if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil { - bufSize := utils.Min(httpRange.Length, f.GetSize()) - newBuf := bytes.NewBuffer(make([]byte, 0, bufSize)) - n, err := utils.CopyWithBufferN(newBuf, f.Reader, bufSize) + var cache io.ReaderAt = f.GetFile() + if cache == nil { + if size <= InMemoryBufMaxSizeBytes { + bufSize := min(size, f.GetSize()) + // 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom + // 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大 + buf := make([]byte, bufSize) + n, err := io.ReadFull(f.Reader, buf) if err != nil { return nil, err } - if n != bufSize { + if n != int(bufSize) { return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n) } - f.peekBuff = bytes.NewReader(newBuf.Bytes()) + f.peekBuff = bytes.NewReader(buf) f.Reader = io.MultiReader(f.peekBuff, f.Reader) - return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil + cache = f.peekBuff } else { - _, err := f.CacheFullInTempFile() + var err error + cache, err = f.CacheFullInTempFile() if err != nil { return nil, err } } } - return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil + return io.NewSectionReader(cache, httpRange.Start, httpRange.Length), nil } var _ model.FileStreamer = (*SeekableStream)(nil) @@ -134,6 +155,10 @@ var _ model.FileStreamer = (*FileStream)(nil) //var _ seekableStream = (*FileStream)(nil) // for most internal stream, which is either RangeReadCloser or MFile +// Any functionality implemented based on SeekableStream should implement a Close method, +// whose only purpose is to close the SeekableStream object. If such functionality has +// additional resources that need to be closed, they should be added to the Closer property of +// the SeekableStream object and be closed together when the SeekableStream object is closed. type SeekableStream struct { FileStream Link *model.Link @@ -146,37 +171,55 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) if len(fs.Mimetype) == 0 { fs.Mimetype = utils.GetMimeType(fs.Obj.GetName()) } - ss := SeekableStream{FileStream: fs, Link: link} + ss := &SeekableStream{FileStream: fs, Link: link} if ss.Reader != nil { result, ok := ss.Reader.(model.File) if ok { ss.mFile = result ss.Closers.Add(result) - return &ss, nil + return ss, nil } } if ss.Link != nil { if ss.Link.MFile != nil { - ss.mFile = ss.Link.MFile - ss.Reader = ss.Link.MFile - ss.Closers.Add(ss.Link.MFile) - return &ss, nil + mFile := ss.Link.MFile + if _, ok := mFile.(*os.File); !ok { + mFile = &RateLimitFile{ + File: mFile, + Limiter: ServerDownloadLimit, + Ctx: fs.Ctx, + } + } + ss.mFile = mFile + ss.Reader = mFile + ss.Closers.Add(mFile) + return ss, nil } - if ss.Link.RangeReadCloser != nil { - ss.rangeReadCloser = ss.Link.RangeReadCloser - return &ss, nil + ss.rangeReadCloser = &RateLimitRangeReadCloser{ + RangeReadCloserIF: ss.Link.RangeReadCloser, + Limiter: ServerDownloadLimit, + } + ss.Add(ss.rangeReadCloser) + return ss, nil } if len(ss.Link.URL) > 0 { rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link) if err != nil { return nil, err } + rrc = &RateLimitRangeReadCloser{ + RangeReadCloserIF: rrc, + Limiter: ServerDownloadLimit, + } ss.rangeReadCloser = rrc - return &ss, nil + ss.Add(rrc) + return ss, nil } } - + if fs.Reader != nil { + return ss, nil + } return nil, fmt.Errorf("illegal seekableStream") } @@ -187,7 +230,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) // RangeRead is not thread-safe, pls use it in single thread only. func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { if httpRange.Length == -1 { - httpRange.Length = ss.GetSize() + httpRange.Length = ss.GetSize() - httpRange.Start } if ss.mFile != nil { return io.NewSectionReader(ss.mFile, httpRange.Start, httpRange.Length), nil @@ -202,7 +245,7 @@ func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, erro } return rc, nil } - return nil, fmt.Errorf("can't find mFile or rangeReadCloser") + return ss.FileStream.RangeRead(httpRange) } //func (f *FileStream) GetReader() io.Reader { @@ -224,8 +267,6 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) { return 0, nil } ss.Reader = io.NopCloser(rc) - ss.Closers.Add(rc) - } return ss.Reader.Read(p) } @@ -244,10 +285,308 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) { ss.Add(tmpF) ss.tmpFile = tmpF ss.Reader = tmpF - return ss.tmpFile, nil + return tmpF, nil +} + +func (ss *SeekableStream) GetFile() model.File { + if ss.tmpFile != nil { + return ss.tmpFile + } + if ss.mFile != nil { + return ss.mFile + } + return nil } func (f *FileStream) SetTmpFile(r *os.File) { - f.Reader = r + f.Add(r) f.tmpFile = r + f.Reader = r +} + +type ReaderWithSize interface { + io.ReadCloser + GetSize() int64 +} + +type SimpleReaderWithSize struct { + io.Reader + Size int64 +} + +func (r *SimpleReaderWithSize) GetSize() int64 { + return r.Size +} + +func (r *SimpleReaderWithSize) Close() error { + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +type ReaderUpdatingProgress struct { + Reader ReaderWithSize + model.UpdateProgress + offset int +} + +func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + r.offset += n + r.UpdateProgress(math.Min(100.0, float64(r.offset)/float64(r.Reader.GetSize())*100.0)) + return n, err +} + +func (r *ReaderUpdatingProgress) Close() error { + return r.Reader.Close() +} + +type SStreamReadAtSeeker interface { + model.File + GetRawStream() *SeekableStream +} + +type readerCur struct { + reader io.Reader + cur int64 +} + +type RangeReadReadAtSeeker struct { + ss *SeekableStream + masterOff int64 + readers []*readerCur + headCache *headCache +} + +type headCache struct { + *readerCur + bufs [][]byte +} + +func (c *headCache) read(p []byte) (n int, err error) { + pL := len(p) + logrus.Debugf("headCache read_%d", pL) + if c.cur < int64(pL) { + bufL := int64(pL) - c.cur + buf := make([]byte, bufL) + lr := io.LimitReader(c.reader, bufL) + off := 0 + for c.cur < int64(pL) { + n, err = lr.Read(buf[off:]) + off += n + c.cur += int64(n) + if err == io.EOF && off == int(bufL) { + err = nil + } + if err != nil { + break + } + } + c.bufs = append(c.bufs, buf) + } + n = 0 + if c.cur >= int64(pL) { + for i := 0; n < pL; i++ { + buf := c.bufs[i] + r := len(buf) + if n+r > pL { + r = pL - n + } + n += copy(p[n:], buf[:r]) + } + } + return +} +func (r *headCache) Close() error { + for i := range r.bufs { + r.bufs[i] = nil + } + r.bufs = nil + return nil +} + +func (r *RangeReadReadAtSeeker) InitHeadCache() { + if r.ss.Link.MFile == nil && r.masterOff == 0 { + reader := r.readers[0] + r.readers = r.readers[1:] + r.headCache = &headCache{readerCur: reader} + r.ss.Closers.Add(r.headCache) + } +} + +func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) { + if ss.mFile != nil { + _, err := ss.mFile.Seek(offset, io.SeekStart) + if err != nil { + return nil, err + } + return &FileReadAtSeeker{ss: ss}, nil + } + r := &RangeReadReadAtSeeker{ + ss: ss, + masterOff: offset, + } + if offset != 0 || utils.IsBool(forceRange...) { + if offset < 0 || offset > ss.GetSize() { + return nil, errors.New("offset out of range") + } + _, err := r.getReaderAtOffset(offset) + if err != nil { + return nil, err + } + } else { + rc := &readerCur{reader: ss, cur: offset} + r.readers = append(r.readers, rc) + } + return r, nil +} + +func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) { + readers := make([]readerutil.SizeReaderAt, 0, len(ss)) + for _, s := range ss { + ra, err := NewReadAtSeeker(s, 0) + if err != nil { + return nil, err + } + readers = append(readers, io.NewSectionReader(ra, 0, s.GetSize())) + } + return readerutil.NewMultiReaderAt(readers...), nil +} + +func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream { + return r.ss +} + +func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) { + var rc *readerCur + for _, reader := range r.readers { + if reader.cur == -1 { + continue + } + if reader.cur == off { + // logrus.Debugf("getReaderAtOffset match_%d", off) + return reader, nil + } + if reader.cur > 0 && off >= reader.cur && (rc == nil || reader.cur < rc.cur) { + rc = reader + } + } + if rc != nil && off-rc.cur <= utils.MB { + n, err := utils.CopyWithBufferN(io.Discard, rc.reader, off-rc.cur) + rc.cur += n + if err == io.EOF && rc.cur == off { + err = nil + } + if err == nil { + logrus.Debugf("getReaderAtOffset old_%d", off) + return rc, nil + } + rc.cur = -1 + } + logrus.Debugf("getReaderAtOffset new_%d", off) + + // Range请求不能超过文件大小,有些云盘处理不了就会返回整个文件 + reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: r.ss.GetSize() - off}) + if err != nil { + return nil, err + } + rc = &readerCur{reader: reader, cur: off} + r.readers = append(r.readers, rc) + return rc, nil +} + +func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) { + if off == 0 && r.headCache != nil { + return r.headCache.read(p) + } + rc, err := r.getReaderAtOffset(off) + if err != nil { + return 0, err + } + n, num := 0, 0 + for num < len(p) { + n, err = rc.reader.Read(p[num:]) + rc.cur += int64(n) + num += n + if err == nil { + continue + } + if err == io.EOF { + // io.EOF是reader读取完了 + rc.cur = -1 + // yeka/zip包 没有处理EOF,我们要兼容 + // https://github.com/yeka/zip/blob/03d6312748a9d6e0bc0c9a7275385c09f06d9c14/reader.go#L433 + if num == len(p) { + err = nil + } + } + break + } + return num, err +} + +func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + case io.SeekCurrent: + if offset == 0 { + return r.masterOff, nil + } + offset += r.masterOff + case io.SeekEnd: + offset += r.ss.GetSize() + default: + return 0, errs.NotSupport + } + if offset < 0 { + return r.masterOff, errors.New("invalid seek: negative position") + } + if offset > r.ss.GetSize() { + return r.masterOff, io.EOF + } + r.masterOff = offset + return offset, nil +} + +func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { + if r.masterOff == 0 && r.headCache != nil { + return r.headCache.read(p) + } + rc, err := r.getReaderAtOffset(r.masterOff) + if err != nil { + return 0, err + } + n, err = rc.reader.Read(p) + rc.cur += int64(n) + r.masterOff += int64(n) + return n, err +} + +func (r *RangeReadReadAtSeeker) Close() error { + return r.ss.Close() +} + +type FileReadAtSeeker struct { + ss *SeekableStream +} + +func (f *FileReadAtSeeker) GetRawStream() *SeekableStream { + return f.ss +} + +func (f *FileReadAtSeeker) Read(p []byte) (n int, err error) { + return f.ss.mFile.Read(p) +} + +func (f *FileReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) { + return f.ss.mFile.ReadAt(p, off) +} + +func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) { + return f.ss.mFile.Seek(offset, whence) +} + +func (f *FileReadAtSeeker) Close() error { + return f.ss.Close() } diff --git a/internal/stream/util.go b/internal/stream/util.go index 7d2b7ef7..5b935a90 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -2,14 +2,15 @@ package stream import ( "context" + "encoding/hex" "fmt" "io" "net/http" - "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/alist-org/alist/v3/pkg/utils" log "github.com/sirupsen/logrus" ) @@ -17,10 +18,9 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl if len(link.URL) == 0 { return nil, fmt.Errorf("can't create RangeReadCloser since URL is empty in link") } - //remoteClosers := utils.EmptyClosers() rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) { if link.Concurrency != 0 || link.PartSize != 0 { - header := net.ProcessHeader(http.Header{}, link.Header) + header := net.ProcessHeader(nil, link.Header) down := net.NewDownloader(func(d *net.Downloader) { d.Concurrency = link.Concurrency d.PartSize = link.PartSize @@ -32,44 +32,36 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl HeaderRef: header, } rc, err := down.Download(ctx, req) - if err != nil { - return nil, errs.NewErr(err, "GetReadCloserFromLink failed") - } - return rc, nil + return rc, err } - if len(link.URL) > 0 { - response, err := RequestRangedHttp(ctx, link, r.Start, r.Length) - if err != nil { - if response == nil { - return nil, fmt.Errorf("http request failure, err:%s", err) - } - return nil, fmt.Errorf("http request failure,status: %d err:%s", response.StatusCode, err) + response, err := RequestRangedHttp(ctx, link, r.Start, r.Length) + if err != nil { + if response == nil { + return nil, fmt.Errorf("http request failure, err:%s", err) } - if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent || - checkContentRange(&response.Header, r.Start) { - return response.Body, nil - } else if response.StatusCode == http.StatusOK { - log.Warnf("remote http server not supporting range request, expect low perfromace!") - readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length) - if err != nil { - return nil, err - } - return readCloser, nil - - } - + return nil, err + } + if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent || + checkContentRange(&response.Header, r.Start) { return response.Body, nil + } else if response.StatusCode == http.StatusOK { + log.Warnf("remote http server not supporting range request, expect low perfromace!") + readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length) + if err != nil { + return nil, err + } + return readCloser, nil } - return nil, errs.NotSupport + return response.Body, nil } resultRangeReadCloser := model.RangeReadCloser{RangeReader: rangeReaderFunc} return &resultRangeReadCloser, nil } func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) { - header := net.ProcessHeader(http.Header{}, link.Header) + header := net.ProcessHeader(nil, link.Header) header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header) return net.RequestHttp(ctx, "GET", header, link.URL) @@ -86,3 +78,64 @@ func checkContentRange(header *http.Header, offset int64) bool { } return false } + +type ReaderWithCtx struct { + io.Reader + Ctx context.Context +} + +func (r *ReaderWithCtx) Read(p []byte) (n int, err error) { + if utils.IsCanceled(r.Ctx) { + return 0, r.Ctx.Err() + } + return r.Reader.Read(p) +} + +func (r *ReaderWithCtx) Close() error { + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +func CacheFullInTempFileAndUpdateProgress(stream model.FileStreamer, up model.UpdateProgress) (model.File, error) { + if cache := stream.GetFile(); cache != nil { + up(100) + return cache, nil + } + tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{ + Reader: stream, + UpdateProgress: up, + }, stream.GetSize()) + if err == nil { + stream.SetTmpFile(tmpF) + } + return tmpF, err +} + +func CacheFullInTempFileAndWriter(stream model.FileStreamer, w io.Writer) (model.File, error) { + if cache := stream.GetFile(); cache != nil { + _, err := cache.Seek(0, io.SeekStart) + if err == nil { + _, err = utils.CopyWithBuffer(w, cache) + if err == nil { + _, err = cache.Seek(0, io.SeekStart) + } + } + return cache, err + } + tmpF, err := utils.CreateTempFile(io.TeeReader(stream, w), stream.GetSize()) + if err == nil { + stream.SetTmpFile(tmpF) + } + return tmpF, err +} + +func CacheFullInTempFileAndHash(stream model.FileStreamer, hashType *utils.HashType, params ...any) (model.File, string, error) { + h := hashType.NewFunc(params...) + tmpF, err := CacheFullInTempFileAndWriter(stream, h) + if err != nil { + return nil, "", err + } + return tmpF, hex.EncodeToString(h.Sum(nil)), err +} diff --git a/internal/task/base.go b/internal/task/base.go new file mode 100644 index 00000000..c3703bd1 --- /dev/null +++ b/internal/task/base.go @@ -0,0 +1,90 @@ +package task + +import ( + "context" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/model" + "github.com/xhofe/tache" + "sync" + "time" +) + +type TaskExtension struct { + tache.Base + ctx context.Context + ctxInitMutex sync.Mutex + Creator *model.User + startTime *time.Time + endTime *time.Time + totalBytes int64 +} + +func (t *TaskExtension) SetCreator(creator *model.User) { + t.Creator = creator + t.Persist() +} + +func (t *TaskExtension) GetCreator() *model.User { + return t.Creator +} + +func (t *TaskExtension) SetStartTime(startTime time.Time) { + t.startTime = &startTime +} + +func (t *TaskExtension) GetStartTime() *time.Time { + return t.startTime +} + +func (t *TaskExtension) SetEndTime(endTime time.Time) { + t.endTime = &endTime +} + +func (t *TaskExtension) GetEndTime() *time.Time { + return t.endTime +} + +func (t *TaskExtension) ClearEndTime() { + t.endTime = nil +} + +func (t *TaskExtension) SetTotalBytes(totalBytes int64) { + t.totalBytes = totalBytes +} + +func (t *TaskExtension) GetTotalBytes() int64 { + return t.totalBytes +} + +func (t *TaskExtension) Ctx() context.Context { + if t.ctx == nil { + t.ctxInitMutex.Lock() + if t.ctx == nil { + t.ctx = context.WithValue(t.Base.Ctx(), "user", t.Creator) + } + t.ctxInitMutex.Unlock() + } + return t.ctx +} + +func (t *TaskExtension) ReinitCtx() { + if !conf.Conf.Tasks.AllowRetryCanceled { + return + } + select { + case <-t.Base.Ctx().Done(): + ctx, cancel := context.WithCancel(context.Background()) + t.SetCtx(ctx) + t.SetCancelFunc(cancel) + t.ctx = nil + default: + } +} + +type TaskExtensionInfo interface { + tache.TaskWithInfo + GetCreator() *model.User + GetStartTime() *time.Time + GetEndTime() *time.Time + GetTotalBytes() int64 +} diff --git a/internal/task/manager.go b/internal/task/manager.go new file mode 100644 index 00000000..3caa685a --- /dev/null +++ b/internal/task/manager.go @@ -0,0 +1,20 @@ +package task + +import "github.com/xhofe/tache" + +type Manager[T tache.Task] interface { + Add(task T) + Cancel(id string) + CancelAll() + CancelByCondition(condition func(task T) bool) + GetAll() []T + GetByID(id string) (T, bool) + GetByState(state ...tache.State) []T + GetByCondition(condition func(task T) bool) []T + Remove(id string) + RemoveAll() + RemoveByState(state ...tache.State) + RemoveByCondition(condition func(task T) bool) + Retry(id string) + RetryAllFailed() +} diff --git a/pkg/utils/hash.go b/pkg/utils/hash.go index fa06bcc2..a281dd4e 100644 --- a/pkg/utils/hash.go +++ b/pkg/utils/hash.go @@ -10,6 +10,7 @@ import ( "errors" "hash" "io" + "iter" "github.com/alist-org/alist/v3/internal/errs" log "github.com/sirupsen/logrus" @@ -226,3 +227,13 @@ func (hi HashInfo) GetHash(ht *HashType) string { func (hi HashInfo) Export() map[*HashType]string { return hi.h } + +func (hi HashInfo) All() iter.Seq2[*HashType, string] { + return func(yield func(*HashType, string) bool) { + for hashType, hashValue := range hi.h { + if !yield(hashType, hashValue) { + return + } + } + } +} diff --git a/pkg/utils/path.go b/pkg/utils/path.go index c0793a3e..135f8e4e 100644 --- a/pkg/utils/path.go +++ b/pkg/utils/path.go @@ -45,7 +45,7 @@ func IsSubPath(path string, subPath string) bool { func Ext(path string) string { ext := stdpath.Ext(path) - if strings.HasPrefix(ext, ".") { + if len(ext) > 0 && ext[0] == '.' { ext = ext[1:] } return strings.ToLower(ext) diff --git a/pkg/utils/random/random.go b/pkg/utils/random/random.go index 65fbf14a..c3f3dd48 100644 --- a/pkg/utils/random/random.go +++ b/pkg/utils/random/random.go @@ -1,20 +1,27 @@ package random import ( - "math/rand" + "crypto/rand" + "math/big" + mathRand "math/rand" "time" "github.com/google/uuid" ) -var Rand *rand.Rand +var Rand *mathRand.Rand const letterBytes = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" func String(n int) string { b := make([]byte, n) + letterLen := big.NewInt(int64(len(letterBytes))) for i := range b { - b[i] = letterBytes[Rand.Intn(len(letterBytes))] + idx, err := rand.Int(rand.Reader, letterLen) + if err != nil { + panic(err) + } + b[i] = letterBytes[idx.Int64()] } return string(b) } @@ -24,10 +31,10 @@ func Token() string { } func RangeInt64(left, right int64) int64 { - return rand.Int63n(left+right) - left + return mathRand.Int63n(left+right) - left } func init() { - s := rand.NewSource(time.Now().UnixNano()) - Rand = rand.New(s) + s := mathRand.NewSource(time.Now().UnixNano()) + Rand = mathRand.New(s) } diff --git a/pkg/utils/time.go b/pkg/utils/time.go index aa706928..36573b4e 100644 --- a/pkg/utils/time.go +++ b/pkg/utils/time.go @@ -34,31 +34,36 @@ func NewDebounce2(interval time.Duration, f func()) func() { if timer == nil { timer = time.AfterFunc(interval, f) } - (*time.Timer)(timer).Reset(interval) + timer.Reset(interval) } } func NewThrottle(interval time.Duration) func(func()) { var lastCall time.Time - + var lock sync.Mutex return func(fn func()) { + lock.Lock() + defer lock.Unlock() + now := time.Now() - if now.Sub(lastCall) < interval { - return + if now.Sub(lastCall) >= interval { + lastCall = now + go fn() } - time.AfterFunc(interval, fn) - lastCall = now } } func NewThrottle2(interval time.Duration, fn func()) func() { var lastCall time.Time + var lock sync.Mutex return func() { + lock.Lock() + defer lock.Unlock() + now := time.Now() - if now.Sub(lastCall) < interval { - return + if now.Sub(lastCall) >= interval { + lastCall = now + go fn() } - time.AfterFunc(interval, fn) - lastCall = now } } diff --git a/server/common/base.go b/server/common/base.go index eb6ef2b8..11a28d25 100644 --- a/server/common/base.go +++ b/server/common/base.go @@ -12,16 +12,16 @@ import ( func GetApiUrl(r *http.Request) string { api := conf.Conf.SiteURL if strings.HasPrefix(api, "http") { - return api + return strings.TrimSuffix(api, "/") } if r != nil { protocol := "http" if r.TLS != nil || r.Header.Get("X-Forwarded-Proto") == "https" { protocol = "https" } - host := r.Host - if r.Header.Get("X-Forwarded-Host") != "" { - host = r.Header.Get("X-Forwarded-Host") + host := r.Header.Get("X-Forwarded-Host") + if host == "" { + host = r.Host } api = fmt.Sprintf("%s://%s", protocol, stdpath.Join(host, api)) } diff --git a/server/common/common.go b/server/common/common.go index 28d2da44..33ae704e 100644 --- a/server/common/common.go +++ b/server/common/common.go @@ -1,6 +1,8 @@ package common import ( + "context" + "net/http" "strings" "github.com/alist-org/alist/v3/cmd/flags" @@ -66,17 +68,32 @@ func ErrorStrResp(c *gin.Context, str string, code int, l ...bool) { } func SuccessResp(c *gin.Context, data ...interface{}) { - if len(data) == 0 { - c.JSON(200, Resp[interface{}]{ - Code: 200, - Message: "success", - Data: nil, - }) - return + SuccessWithMsgResp(c, "success", data...) +} + +func SuccessWithMsgResp(c *gin.Context, msg string, data ...interface{}) { + var respData interface{} + if len(data) > 0 { + respData = data[0] } + c.JSON(200, Resp[interface{}]{ Code: 200, - Message: "success", - Data: data[0], + Message: msg, + Data: respData, }) } + +func Pluralize(count int, singular, plural string) string { + if count == 1 { + return singular + } + return plural +} + +func GetHttpReq(ctx context.Context) *http.Request { + if c, ok := ctx.(*gin.Context); ok { + return c.Request + } + return nil +} diff --git a/server/common/proxy.go b/server/common/proxy.go index 10923613..ca7f6325 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -6,6 +6,10 @@ import ( "io" "net/http" "net/url" + "os" + "strings" + + "maps" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/net" @@ -18,27 +22,36 @@ import ( func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error { if link.MFile != nil { defer link.MFile.Close() - attachFileName(w, file) + attachHeader(w, file) contentType := link.Header.Get("Content-Type") if contentType != "" { w.Header().Set("Content-Type", contentType) } - http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile) + mFile := link.MFile + if _, ok := mFile.(*os.File); !ok { + mFile = &stream.RateLimitFile{ + File: mFile, + Limiter: stream.ServerDownloadLimit, + Ctx: r.Context(), + } + } + http.ServeContent(w, r, file.GetName(), file.ModTime(), mFile) return nil } else if link.RangeReadCloser != nil { - attachFileName(w, file) - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeRead) - defer func() { - _ = link.RangeReadCloser.Close() - }() - return nil + attachHeader(w, file) + return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ + RangeReadCloserIF: link.RangeReadCloser, + Limiter: stream.ServerDownloadLimit, + }) } else if link.Concurrency != 0 || link.PartSize != 0 { - attachFileName(w, file) + attachHeader(w, file) size := file.GetSize() - //var finalClosers model.Closers - finalClosers := utils.EmptyClosers() - header := net.ProcessHeader(r.Header, link.Header) rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { + requestHeader := ctx.Value("request_header") + if requestHeader == nil { + requestHeader = http.Header{} + } + header := net.ProcessHeader(requestHeader.(http.Header), link.Header) down := net.NewDownloader(func(d *net.Downloader) { d.Concurrency = link.Concurrency d.PartSize = link.PartSize @@ -50,39 +63,52 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model. HeaderRef: header, } rc, err := down.Download(ctx, req) - finalClosers.Add(rc) return rc, err } - net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), rangeReader) - defer finalClosers.Close() - return nil + return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{ + RangeReadCloserIF: &model.RangeReadCloser{RangeReader: rangeReader}, + Limiter: stream.ServerDownloadLimit, + }) } else { //transparent proxy header := net.ProcessHeader(r.Header, link.Header) - res, err := net.RequestHttp(context.Background(), r.Method, header, link.URL) + res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL) if err != nil { return err } defer res.Body.Close() - for h, v := range res.Header { - w.Header()[h] = v - } + maps.Copy(w.Header(), res.Header) w.WriteHeader(res.StatusCode) if r.Method == http.MethodHead { return nil } - _, err = io.Copy(w, res.Body) - if err != nil { - return err - } - return nil + _, err = utils.CopyWithBuffer(w, &stream.RateLimitReader{ + Reader: res.Body, + Limiter: stream.ServerDownloadLimit, + Ctx: r.Context(), + }) + return err } } -func attachFileName(w http.ResponseWriter, file model.Obj) { +func attachHeader(w http.ResponseWriter, file model.Obj) { fileName := file.GetName() w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, fileName, url.PathEscape(fileName))) w.Header().Set("Content-Type", utils.GetMimeType(fileName)) + w.Header().Set("Etag", GetEtag(file)) +} +func GetEtag(file model.Obj) string { + hash := "" + for _, v := range file.GetHash().Export() { + if strings.Compare(v, hash) > 0 { + hash = v + } + } + if len(hash) > 0 { + return fmt.Sprintf(`"%s"`, hash) + } + // 参考nginx + return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), file.GetSize()) } var NoProxyRange = &model.RangeReadCloser{} @@ -102,3 +128,29 @@ func ProxyRange(link *model.Link, size int64) { link.RangeReadCloser = nil } } + +type InterceptResponseWriter struct { + http.ResponseWriter + io.Writer +} + +func (iw *InterceptResponseWriter) Write(p []byte) (int, error) { + return iw.Writer.Write(p) +} + +type WrittenResponseWriter struct { + http.ResponseWriter + written bool +} + +func (ww *WrittenResponseWriter) Write(p []byte) (int, error) { + n, err := ww.ResponseWriter.Write(p) + if !ww.written && n > 0 { + ww.written = true + } + return n, err +} + +func (ww *WrittenResponseWriter) IsWritten() bool { + return ww.written +} diff --git a/server/debug.go b/server/debug.go index 081ef8c3..a4242abd 100644 --- a/server/debug.go +++ b/server/debug.go @@ -5,6 +5,7 @@ import ( _ "net/http/pprof" "runtime" + "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/server/common" "github.com/alist-org/alist/v3/server/middlewares" "github.com/gin-gonic/gin" @@ -15,7 +16,7 @@ func _pprof(g *gin.RouterGroup) { } func debug(g *gin.RouterGroup) { - g.GET("/path/*path", middlewares.Down, func(ctx *gin.Context) { + g.GET("/path/*path", middlewares.Down(sign.Verify), func(ctx *gin.Context) { rawPath := ctx.MustGet("path").(string) ctx.JSON(200, gin.H{ "path": rawPath, diff --git a/server/ftp.go b/server/ftp.go new file mode 100644 index 00000000..4d507b68 --- /dev/null +++ b/server/ftp.go @@ -0,0 +1,288 @@ +package server + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/ftp" + "math/rand" + "net" + "net/http" + "os" + "strconv" + "strings" + "sync" +) + +type FtpMainDriver struct { + settings *ftpserver.Settings + proxyHeader *http.Header + clients map[uint32]ftpserver.ClientContext + shutdownLock sync.RWMutex + isShutdown bool + tlsConfig *tls.Config +} + +func NewMainDriver() (*FtpMainDriver, error) { + header := &http.Header{} + header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent)) + transferType := ftpserver.TransferTypeASCII + if conf.Conf.FTP.DefaultTransferBinary { + transferType = ftpserver.TransferTypeBinary + } + activeConnCheck := ftpserver.IPMatchDisabled + if conf.Conf.FTP.EnableActiveConnIPCheck { + activeConnCheck = ftpserver.IPMatchRequired + } + pasvConnCheck := ftpserver.IPMatchDisabled + if conf.Conf.FTP.EnablePasvConnIPCheck { + pasvConnCheck = ftpserver.IPMatchRequired + } + tlsRequired := ftpserver.ClearOrEncrypted + if setting.GetBool(conf.FTPImplicitTLS) { + tlsRequired = ftpserver.ImplicitEncryption + } else if setting.GetBool(conf.FTPMandatoryTLS) { + tlsRequired = ftpserver.MandatoryEncryption + } + tlsConf, err := getTlsConf(setting.GetStr(conf.FTPTLSPrivateKeyPath), setting.GetStr(conf.FTPTLSPublicCertPath)) + if err != nil && tlsRequired != ftpserver.ClearOrEncrypted { + return nil, fmt.Errorf("FTP mandatory TLS has been enabled, but the certificate failed to load: %w", err) + } + return &FtpMainDriver{ + settings: &ftpserver.Settings{ + ListenAddr: conf.Conf.FTP.Listen, + PublicHost: lookupIP(setting.GetStr(conf.FTPPublicHost)), + PassiveTransferPortGetter: newPortMapper(setting.GetStr(conf.FTPPasvPortMap)), + FindPasvPortAttempts: conf.Conf.FTP.FindPasvPortAttempts, + ActiveTransferPortNon20: conf.Conf.FTP.ActiveTransferPortNon20, + IdleTimeout: conf.Conf.FTP.IdleTimeout, + ConnectionTimeout: conf.Conf.FTP.ConnectionTimeout, + DisableMLSD: false, + DisableMLST: false, + DisableMFMT: true, + Banner: setting.GetStr(conf.Announcement), + TLSRequired: tlsRequired, + DisableLISTArgs: false, + DisableSite: false, + DisableActiveMode: conf.Conf.FTP.DisableActiveMode, + EnableHASH: false, + DisableSTAT: false, + DisableSYST: false, + EnableCOMB: false, + DefaultTransferType: transferType, + ActiveConnectionsCheck: activeConnCheck, + PasvConnectionsCheck: pasvConnCheck, + SiteHandlers: map[string]ftpserver.SiteHandler{ + "SIZE": ftp.HandleSIZE, + }, + }, + proxyHeader: header, + clients: make(map[uint32]ftpserver.ClientContext), + shutdownLock: sync.RWMutex{}, + isShutdown: false, + tlsConfig: tlsConf, + }, nil +} + +func (d *FtpMainDriver) GetSettings() (*ftpserver.Settings, error) { + return d.settings, nil +} + +func (d *FtpMainDriver) ClientConnected(cc ftpserver.ClientContext) (string, error) { + if d.isShutdown || !d.shutdownLock.TryRLock() { + return "", errors.New("server has shutdown") + } + defer d.shutdownLock.RUnlock() + d.clients[cc.ID()] = cc + return "AList FTP Endpoint", nil +} + +func (d *FtpMainDriver) ClientDisconnected(cc ftpserver.ClientContext) { + err := cc.Close() + if err != nil { + utils.Log.Errorf("failed to close client: %v", err) + } + delete(d.clients, cc.ID()) +} + +func (d *FtpMainDriver) AuthUser(cc ftpserver.ClientContext, user, pass string) (ftpserver.ClientDriver, error) { + var userObj *model.User + var err error + if user == "anonymous" || user == "guest" { + userObj, err = op.GetGuest() + if err != nil { + return nil, err + } + } else { + userObj, err = op.GetUserByName(user) + if err != nil { + return nil, err + } + passHash := model.StaticHash(pass) + if err = userObj.ValidatePwdStaticHash(passHash); err != nil { + return nil, err + } + } + if userObj.Disabled || !userObj.CanFTPAccess() { + return nil, errors.New("user is not allowed to access via FTP") + } + + ctx := context.Background() + ctx = context.WithValue(ctx, "user", userObj) + if user == "anonymous" || user == "guest" { + ctx = context.WithValue(ctx, "meta_pass", pass) + } else { + ctx = context.WithValue(ctx, "meta_pass", "") + } + ctx = context.WithValue(ctx, "client_ip", cc.RemoteAddr().String()) + ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader) + return ftp.NewAferoAdapter(ctx), nil +} + +func (d *FtpMainDriver) GetTLSConfig() (*tls.Config, error) { + if d.tlsConfig == nil { + return nil, errors.New("TLS config not provided") + } + return d.tlsConfig, nil +} + +func (d *FtpMainDriver) Stop() { + d.isShutdown = true + d.shutdownLock.Lock() + defer d.shutdownLock.Unlock() + for _, value := range d.clients { + _ = value.Close() + } +} + +func lookupIP(host string) string { + if host == "" || net.ParseIP(host) != nil { + return host + } + ips, err := net.LookupIP(host) + if err != nil || len(ips) == 0 { + utils.Log.Fatalf("given FTP public host is invalid, and the default value will be used: %v", err) + return "" + } + for _, ip := range ips { + if ip.To4() != nil { + return ip.String() + } + } + v6 := ips[0].String() + utils.Log.Warnf("no IPv4 record looked up, %s will be used as public host, and it might do not work.", v6) + return v6 +} + +func newPortMapper(str string) ftpserver.PasvPortGetter { + if str == "" { + return nil + } + pasvPortMappers := strings.Split(strings.Replace(str, "\n", ",", -1), ",") + type group struct { + ExposedStart int + ListenedStart int + Length int + } + groups := make([]group, len(pasvPortMappers)) + totalLength := 0 + convertToPorts := func(str string) (int, int, error) { + start, end, multi := strings.Cut(str, "-") + if multi { + si, err := strconv.Atoi(start) + if err != nil { + return 0, 0, err + } + ei, err := strconv.Atoi(end) + if err != nil { + return 0, 0, err + } + if ei < si || ei < 1024 || si < 1024 || ei > 65535 || si > 65535 { + return 0, 0, errors.New("invalid port") + } + return si, ei - si + 1, nil + } else { + ret, err := strconv.Atoi(str) + if err != nil { + return 0, 0, err + } else { + return ret, 1, nil + } + } + } + for i, mapper := range pasvPortMappers { + var err error + exposed, listened, mapped := strings.Cut(mapper, ":") + for { + if mapped { + var es, ls, el, ll int + es, el, err = convertToPorts(exposed) + if err != nil { + break + } + ls, ll, err = convertToPorts(listened) + if err != nil { + break + } + if el != ll { + err = errors.New("the number of exposed ports and listened ports does not match") + break + } + groups[i].ExposedStart = es + groups[i].ListenedStart = ls + groups[i].Length = el + totalLength += el + } else { + var start, length int + start, length, err = convertToPorts(mapper) + groups[i].ExposedStart = start + groups[i].ListenedStart = start + groups[i].Length = length + totalLength += length + } + break + } + if err != nil { + utils.Log.Fatalf("failed to convert FTP PASV port mapper %s: %v, the port mapper will be ignored.", mapper, err) + return nil + } + } + return func() (int, int, bool) { + idxPort := rand.Intn(totalLength) + for _, g := range groups { + if idxPort >= g.Length { + idxPort -= g.Length + } else { + return g.ExposedStart + idxPort, g.ListenedStart + idxPort, true + } + } + // unreachable + return 0, 0, false + } +} + +func getTlsConf(keyPath, certPath string) (*tls.Config, error) { + if keyPath == "" || certPath == "" { + return nil, errors.New("private key or certificate is not provided") + } + cert, err := os.ReadFile(certPath) + if err != nil { + return nil, err + } + key, err := os.ReadFile(keyPath) + if err != nil { + return nil, err + } + tlsCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + return &tls.Config{Certificates: []tls.Certificate{tlsCert}}, nil +} diff --git a/server/ftp/afero.go b/server/ftp/afero.go new file mode 100644 index 00000000..75ae2e43 --- /dev/null +++ b/server/ftp/afero.go @@ -0,0 +1,121 @@ +package ftp + +import ( + "context" + "errors" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/spf13/afero" + "os" + "time" +) + +type AferoAdapter struct { + ctx context.Context + nextFileSize int64 +} + +func NewAferoAdapter(ctx context.Context) *AferoAdapter { + return &AferoAdapter{ctx: ctx} +} + +func (a *AferoAdapter) Create(_ string) (afero.File, error) { + // See also GetHandle + return nil, errs.NotImplement +} + +func (a *AferoAdapter) Mkdir(name string, _ os.FileMode) error { + return Mkdir(a.ctx, name) +} + +func (a *AferoAdapter) MkdirAll(path string, perm os.FileMode) error { + return a.Mkdir(path, perm) +} + +func (a *AferoAdapter) Open(_ string) (afero.File, error) { + // See also GetHandle and ReadDir + return nil, errs.NotImplement +} + +func (a *AferoAdapter) OpenFile(_ string, _ int, _ os.FileMode) (afero.File, error) { + // See also GetHandle + return nil, errs.NotImplement +} + +func (a *AferoAdapter) Remove(name string) error { + return Remove(a.ctx, name) +} + +func (a *AferoAdapter) RemoveAll(path string) error { + return a.Remove(path) +} + +func (a *AferoAdapter) Rename(oldName, newName string) error { + return Rename(a.ctx, oldName, newName) +} + +func (a *AferoAdapter) Stat(name string) (os.FileInfo, error) { + return Stat(a.ctx, name) +} + +func (a *AferoAdapter) Name() string { + return "AList FTP Endpoint" +} + +func (a *AferoAdapter) Chmod(_ string, _ os.FileMode) error { + return errs.NotSupport +} + +func (a *AferoAdapter) Chown(_ string, _, _ int) error { + return errs.NotSupport +} + +func (a *AferoAdapter) Chtimes(_ string, _ time.Time, _ time.Time) error { + return errs.NotSupport +} + +func (a *AferoAdapter) ReadDir(name string) ([]os.FileInfo, error) { + return List(a.ctx, name) +} + +func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserver.FileTransfer, error) { + fileSize := a.nextFileSize + a.nextFileSize = 0 + if (flags & os.O_SYNC) != 0 { + return nil, errs.NotSupport + } + if (flags & os.O_APPEND) != 0 { + return nil, errs.NotSupport + } + user := a.ctx.Value("user").(*model.User) + path, err := user.JoinPath(name) + if err != nil { + return nil, err + } + _, err = fs.Get(a.ctx, path, &fs.GetArgs{}) + exists := err == nil + if (flags&os.O_CREATE) == 0 && !exists { + return nil, errs.ObjectNotFound + } + if (flags&os.O_EXCL) != 0 && exists { + return nil, errors.New("file already exists") + } + if (flags & os.O_WRONLY) != 0 { + if offset != 0 { + return nil, errs.NotSupport + } + trunc := (flags & os.O_TRUNC) != 0 + if fileSize > 0 { + return OpenUploadWithLength(a.ctx, path, trunc, fileSize) + } else { + return OpenUpload(a.ctx, path, trunc) + } + } + return OpenDownload(a.ctx, path, offset) +} + +func (a *AferoAdapter) SetNextFileSize(size int64) { + a.nextFileSize = size +} diff --git a/server/ftp/fsmanage.go b/server/ftp/fsmanage.go new file mode 100644 index 00000000..fb03c1b9 --- /dev/null +++ b/server/ftp/fsmanage.go @@ -0,0 +1,82 @@ +package ftp + +import ( + "context" + "fmt" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/server/common" + "github.com/pkg/errors" + stdpath "path" +) + +func Mkdir(ctx context.Context, path string) error { + user := ctx.Value("user").(*model.User) + reqPath, err := user.JoinPath(path) + if err != nil { + return err + } + if !user.CanWrite() || !user.CanFTPManage() { + meta, err := op.GetNearestMeta(stdpath.Dir(reqPath)) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return err + } + } + if !common.CanWrite(meta, reqPath) { + return errs.PermissionDenied + } + } + return fs.MakeDir(ctx, reqPath) +} + +func Remove(ctx context.Context, path string) error { + user := ctx.Value("user").(*model.User) + if !user.CanRemove() || !user.CanFTPManage() { + return errs.PermissionDenied + } + reqPath, err := user.JoinPath(path) + if err != nil { + return err + } + return fs.Remove(ctx, reqPath) +} + +func Rename(ctx context.Context, oldPath, newPath string) error { + user := ctx.Value("user").(*model.User) + srcPath, err := user.JoinPath(oldPath) + if err != nil { + return err + } + dstPath, err := user.JoinPath(newPath) + if err != nil { + return err + } + srcDir, srcBase := stdpath.Split(srcPath) + dstDir, dstBase := stdpath.Split(dstPath) + if srcDir == dstDir { + if !user.CanRename() || !user.CanFTPManage() { + return errs.PermissionDenied + } + return fs.Rename(ctx, srcPath, dstBase) + } else { + if !user.CanFTPManage() || !user.CanMove() || (srcBase != dstBase && !user.CanRename()) { + return errs.PermissionDenied + } + if err = fs.Move(ctx, srcPath, dstDir); err != nil { + if srcBase != dstBase { + return err + } + if _, err1 := fs.Copy(ctx, srcPath, dstDir); err1 != nil { + return fmt.Errorf("failed move for %+v, and failed try copying for %+v", err, err1) + } + return nil + } + if srcBase != dstBase { + return fs.Rename(ctx, stdpath.Join(dstDir, srcBase), dstBase) + } + return nil + } +} diff --git a/server/ftp/fsread.go b/server/ftp/fsread.go new file mode 100644 index 00000000..c051a19d --- /dev/null +++ b/server/ftp/fsread.go @@ -0,0 +1,163 @@ +package ftp + +import ( + "context" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/server/common" + "github.com/pkg/errors" + fs2 "io/fs" + "net/http" + "os" + "time" +) + +type FileDownloadProxy struct { + ftpserver.FileTransfer + reader stream.SStreamReadAtSeeker +} + +func OpenDownload(ctx context.Context, reqPath string, offset int64) (*FileDownloadProxy, error) { + user := ctx.Value("user").(*model.User) + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return nil, err + } + } + ctx = context.WithValue(ctx, "meta", meta) + if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) { + return nil, errs.PermissionDenied + } + + // directly use proxy + header := *(ctx.Value("proxy_header").(*http.Header)) + link, obj, err := fs.Link(ctx, reqPath, model.LinkArgs{ + IP: ctx.Value("client_ip").(string), + Header: header, + }) + if err != nil { + return nil, err + } + fileStream := stream.FileStream{ + Obj: obj, + Ctx: ctx, + } + ss, err := stream.NewSeekableStream(fileStream, link) + if err != nil { + return nil, err + } + reader, err := stream.NewReadAtSeeker(ss, offset) + if err != nil { + _ = ss.Close() + return nil, err + } + return &FileDownloadProxy{reader: reader}, nil +} + +func (f *FileDownloadProxy) Read(p []byte) (n int, err error) { + n, err = f.reader.Read(p) + if err != nil { + return + } + err = stream.ClientDownloadLimit.WaitN(f.reader.GetRawStream().Ctx, n) + return +} + +func (f *FileDownloadProxy) Write(p []byte) (n int, err error) { + return 0, errs.NotSupport +} + +func (f *FileDownloadProxy) Seek(offset int64, whence int) (int64, error) { + return f.reader.Seek(offset, whence) +} + +func (f *FileDownloadProxy) Close() error { + return f.reader.Close() +} + +type OsFileInfoAdapter struct { + obj model.Obj +} + +func (o *OsFileInfoAdapter) Name() string { + return o.obj.GetName() +} + +func (o *OsFileInfoAdapter) Size() int64 { + return o.obj.GetSize() +} + +func (o *OsFileInfoAdapter) Mode() fs2.FileMode { + var mode fs2.FileMode = 0755 + if o.IsDir() { + mode |= fs2.ModeDir + } + return mode +} + +func (o *OsFileInfoAdapter) ModTime() time.Time { + return o.obj.ModTime() +} + +func (o *OsFileInfoAdapter) IsDir() bool { + return o.obj.IsDir() +} + +func (o *OsFileInfoAdapter) Sys() any { + return o.obj +} + +func Stat(ctx context.Context, path string) (os.FileInfo, error) { + user := ctx.Value("user").(*model.User) + reqPath, err := user.JoinPath(path) + if err != nil { + return nil, err + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return nil, err + } + } + ctx = context.WithValue(ctx, "meta", meta) + if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) { + return nil, errs.PermissionDenied + } + obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{}) + if err != nil { + return nil, err + } + return &OsFileInfoAdapter{obj: obj}, nil +} + +func List(ctx context.Context, path string) ([]os.FileInfo, error) { + user := ctx.Value("user").(*model.User) + reqPath, err := user.JoinPath(path) + if err != nil { + return nil, err + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return nil, err + } + } + ctx = context.WithValue(ctx, "meta", meta) + if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) { + return nil, errs.PermissionDenied + } + objs, err := fs.List(ctx, reqPath, &fs.ListArgs{}) + if err != nil { + return nil, err + } + ret := make([]os.FileInfo, len(objs)) + for i, obj := range objs { + ret[i] = &OsFileInfoAdapter{obj: obj} + } + return ret, nil +} diff --git a/server/ftp/fsup.go b/server/ftp/fsup.go new file mode 100644 index 00000000..ee38b1bf --- /dev/null +++ b/server/ftp/fsup.go @@ -0,0 +1,218 @@ +package ftp + +import ( + "bytes" + "context" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/server/common" + "github.com/pkg/errors" + "io" + "net/http" + "os" + stdpath "path" + "time" +) + +type FileUploadProxy struct { + ftpserver.FileTransfer + buffer *os.File + path string + ctx context.Context + trunc bool +} + +func uploadAuth(ctx context.Context, path string) error { + user := ctx.Value("user").(*model.User) + meta, err := op.GetNearestMeta(stdpath.Dir(path)) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + return err + } + } + if !(common.CanAccess(user, meta, path, ctx.Value("meta_pass").(string)) && + ((user.CanFTPManage() && user.CanWrite()) || common.CanWrite(meta, stdpath.Dir(path)))) { + return errs.PermissionDenied + } + return nil +} + +func OpenUpload(ctx context.Context, path string, trunc bool) (*FileUploadProxy, error) { + err := uploadAuth(ctx, path) + if err != nil { + return nil, err + } + tmpFile, err := os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return nil, err + } + return &FileUploadProxy{buffer: tmpFile, path: path, ctx: ctx, trunc: trunc}, nil +} + +func (f *FileUploadProxy) Read(p []byte) (n int, err error) { + return 0, errs.NotSupport +} + +func (f *FileUploadProxy) Write(p []byte) (n int, err error) { + n, err = f.buffer.Write(p) + if err != nil { + return + } + err = stream.ClientUploadLimit.WaitN(f.ctx, n) + return +} + +func (f *FileUploadProxy) Seek(offset int64, whence int) (int64, error) { + return f.buffer.Seek(offset, whence) +} + +func (f *FileUploadProxy) Close() error { + dir, name := stdpath.Split(f.path) + size, err := f.buffer.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + if _, err := f.buffer.Seek(0, io.SeekStart); err != nil { + return err + } + arr := make([]byte, 512) + if _, err := f.buffer.Read(arr); err != nil { + return err + } + contentType := http.DetectContentType(arr) + if _, err := f.buffer.Seek(0, io.SeekStart); err != nil { + return err + } + if f.trunc { + _ = fs.Remove(f.ctx, f.path) + } + s := &stream.FileStream{ + Obj: &model.Object{ + Name: name, + Size: size, + Modified: time.Now(), + }, + Mimetype: contentType, + WebPutAsTask: true, + } + s.SetTmpFile(f.buffer) + _, err = fs.PutAsTask(f.ctx, dir, s) + return err +} + +type FileUploadWithLengthProxy struct { + ftpserver.FileTransfer + ctx context.Context + path string + length int64 + first512Bytes [512]byte + pFirst int + pipeWriter io.WriteCloser + errChan chan error +} + +func OpenUploadWithLength(ctx context.Context, path string, trunc bool, length int64) (*FileUploadWithLengthProxy, error) { + err := uploadAuth(ctx, path) + if err != nil { + return nil, err + } + if trunc { + _ = fs.Remove(ctx, path) + } + return &FileUploadWithLengthProxy{ctx: ctx, path: path, length: length}, nil +} + +func (f *FileUploadWithLengthProxy) Read(p []byte) (n int, err error) { + return 0, errs.NotSupport +} + +func (f *FileUploadWithLengthProxy) write(p []byte) (n int, err error) { + if f.pipeWriter != nil { + select { + case e := <-f.errChan: + return 0, e + default: + return f.pipeWriter.Write(p) + } + } else if len(p) < 512-f.pFirst { + copy(f.first512Bytes[f.pFirst:], p) + f.pFirst += len(p) + return len(p), nil + } else { + copy(f.first512Bytes[f.pFirst:], p[:512-f.pFirst]) + contentType := http.DetectContentType(f.first512Bytes[:]) + dir, name := stdpath.Split(f.path) + reader, writer := io.Pipe() + f.errChan = make(chan error, 1) + s := &stream.FileStream{ + Obj: &model.Object{ + Name: name, + Size: f.length, + Modified: time.Now(), + }, + Mimetype: contentType, + WebPutAsTask: false, + Reader: reader, + } + go func() { + e := fs.PutDirectly(f.ctx, dir, s, true) + f.errChan <- e + close(f.errChan) + }() + f.pipeWriter = writer + n, err = writer.Write(f.first512Bytes[:]) + if err != nil { + return n, err + } + n1, err := writer.Write(p[512-f.pFirst:]) + if err != nil { + return n1 + 512 - f.pFirst, err + } + f.pFirst = 512 + return len(p), nil + } +} + +func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) { + n, err = f.write(p) + if err != nil { + return + } + err = stream.ClientUploadLimit.WaitN(f.ctx, n) + return +} + +func (f *FileUploadWithLengthProxy) Seek(offset int64, whence int) (int64, error) { + return 0, errs.NotSupport +} + +func (f *FileUploadWithLengthProxy) Close() error { + if f.pipeWriter != nil { + err := f.pipeWriter.Close() + if err != nil { + return err + } + err = <-f.errChan + return err + } else { + data := f.first512Bytes[:f.pFirst] + contentType := http.DetectContentType(data) + dir, name := stdpath.Split(f.path) + s := &stream.FileStream{ + Obj: &model.Object{ + Name: name, + Size: int64(f.pFirst), + Modified: time.Now(), + }, + Mimetype: contentType, + WebPutAsTask: false, + Reader: bytes.NewReader(data), + } + return fs.PutDirectly(f.ctx, dir, s, true) + } +} diff --git a/server/ftp/site.go b/server/ftp/site.go new file mode 100644 index 00000000..8ea667d8 --- /dev/null +++ b/server/ftp/site.go @@ -0,0 +1,21 @@ +package ftp + +import ( + "fmt" + ftpserver "github.com/KirCute/ftpserverlib-pasvportmap" + "strconv" +) + +func HandleSIZE(param string, client ftpserver.ClientDriver) (int, string) { + fs, ok := client.(*AferoAdapter) + if !ok { + return ftpserver.StatusNotLoggedIn, "Unexpected exception (driver is nil)" + } + size, err := strconv.ParseInt(param, 10, 64) + if err != nil { + return ftpserver.StatusSyntaxErrorParameters, fmt.Sprintf( + "Couldn't parse file size, given: %s, err: %v", param, err) + } + fs.SetNextFileSize(size) + return ftpserver.StatusOK, "Accepted next file size" +} diff --git a/server/handles/archive.go b/server/handles/archive.go new file mode 100644 index 00000000..550bc3ce --- /dev/null +++ b/server/handles/archive.go @@ -0,0 +1,409 @@ +package handles + +import ( + "encoding/json" + "fmt" + "github.com/alist-org/alist/v3/internal/task" + "net/url" + stdpath "path" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/fs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/internal/sign" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/common" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +type ArchiveMetaReq struct { + Path string `json:"path" form:"path"` + Password string `json:"password" form:"password"` + Refresh bool `json:"refresh" form:"refresh"` + ArchivePass string `json:"archive_pass" form:"archive_pass"` +} + +type ArchiveMetaResp struct { + Comment string `json:"comment"` + IsEncrypted bool `json:"encrypted"` + Content []ArchiveContentResp `json:"content"` + Sort *model.Sort `json:"sort,omitempty"` + RawURL string `json:"raw_url"` + Sign string `json:"sign"` +} + +type ArchiveContentResp struct { + ObjResp + Children []ArchiveContentResp `json:"children"` +} + +func toObjsRespWithoutSignAndThumb(obj model.Obj) ObjResp { + return ObjResp{ + Name: obj.GetName(), + Size: obj.GetSize(), + IsDir: obj.IsDir(), + Modified: obj.ModTime(), + Created: obj.CreateTime(), + HashInfoStr: obj.GetHash().String(), + HashInfo: obj.GetHash().Export(), + Sign: "", + Thumb: "", + Type: utils.GetObjType(obj.GetName(), obj.IsDir()), + } +} + +func toContentResp(objs []model.ObjTree) []ArchiveContentResp { + if objs == nil { + return nil + } + ret, _ := utils.SliceConvert(objs, func(src model.ObjTree) (ArchiveContentResp, error) { + return ArchiveContentResp{ + ObjResp: toObjsRespWithoutSignAndThumb(src), + Children: toContentResp(src.GetChildren()), + }, nil + }) + return ret +} + +func FsArchiveMeta(c *gin.Context) { + var req ArchiveMetaReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + user := c.MustGet("user").(*model.User) + if !user.CanReadArchives() { + common.ErrorResp(c, errs.PermissionDenied, 403) + return + } + reqPath, err := user.JoinPath(req.Path) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + return + } + } + c.Set("meta", meta) + if !common.CanAccess(user, meta, reqPath, req.Password) { + common.ErrorStrResp(c, "password is incorrect or you have no permission", 403) + return + } + archiveArgs := model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: req.ArchivePass, + } + ret, err := fs.ArchiveMeta(c, reqPath, model.ArchiveMetaArgs{ + ArchiveArgs: archiveArgs, + Refresh: req.Refresh, + }) + if err != nil { + if errors.Is(err, errs.WrongArchivePassword) { + common.ErrorResp(c, err, 202) + } else { + common.ErrorResp(c, err, 500) + } + return + } + s := "" + if isEncrypt(meta, reqPath) || setting.GetBool(conf.SignAll) { + s = sign.SignArchive(reqPath) + } + api := "/ae" + if ret.DriverProviding { + api = "/ad" + } + common.SuccessResp(c, ArchiveMetaResp{ + Comment: ret.GetComment(), + IsEncrypted: ret.IsEncrypted(), + Content: toContentResp(ret.GetTree()), + Sort: ret.Sort, + RawURL: fmt.Sprintf("%s%s%s", common.GetApiUrl(c.Request), api, utils.EncodePath(reqPath, true)), + Sign: s, + }) +} + +type ArchiveListReq struct { + ArchiveMetaReq + model.PageReq + InnerPath string `json:"inner_path" form:"inner_path"` +} + +type ArchiveListResp struct { + Content []ObjResp `json:"content"` + Total int64 `json:"total"` +} + +func FsArchiveList(c *gin.Context) { + var req ArchiveListReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + req.Validate() + user := c.MustGet("user").(*model.User) + if !user.CanReadArchives() { + common.ErrorResp(c, errs.PermissionDenied, 403) + return + } + reqPath, err := user.JoinPath(req.Path) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + meta, err := op.GetNearestMeta(reqPath) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + return + } + } + c.Set("meta", meta) + if !common.CanAccess(user, meta, reqPath, req.Password) { + common.ErrorStrResp(c, "password is incorrect or you have no permission", 403) + return + } + objs, err := fs.ArchiveList(c, reqPath, model.ArchiveListArgs{ + ArchiveInnerArgs: model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: req.ArchivePass, + }, + InnerPath: utils.FixAndCleanPath(req.InnerPath), + }, + Refresh: req.Refresh, + }) + if err != nil { + if errors.Is(err, errs.WrongArchivePassword) { + common.ErrorResp(c, err, 202) + } else { + common.ErrorResp(c, err, 500) + } + return + } + total, objs := pagination(objs, &req.PageReq) + ret, _ := utils.SliceConvert(objs, func(src model.Obj) (ObjResp, error) { + return toObjsRespWithoutSignAndThumb(src), nil + }) + common.SuccessResp(c, ArchiveListResp{ + Content: ret, + Total: int64(total), + }) +} + +type StringOrArray []string + +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var value string + if err := json.Unmarshal(data, &value); err == nil { + *s = []string{value} + return nil + } + var sliceValue []string + if err := json.Unmarshal(data, &sliceValue); err != nil { + return err + } + *s = sliceValue + return nil +} + +type ArchiveDecompressReq struct { + SrcDir string `json:"src_dir" form:"src_dir"` + DstDir string `json:"dst_dir" form:"dst_dir"` + Name StringOrArray `json:"name" form:"name"` + ArchivePass string `json:"archive_pass" form:"archive_pass"` + InnerPath string `json:"inner_path" form:"inner_path"` + CacheFull bool `json:"cache_full" form:"cache_full"` + PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"` +} + +func FsArchiveDecompress(c *gin.Context) { + var req ArchiveDecompressReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + user := c.MustGet("user").(*model.User) + if !user.CanDecompress() { + common.ErrorResp(c, errs.PermissionDenied, 403) + return + } + srcPaths := make([]string, 0, len(req.Name)) + for _, name := range req.Name { + srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, name)) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + srcPaths = append(srcPaths, srcPath) + } + dstDir, err := user.JoinPath(req.DstDir) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + tasks := make([]task.TaskExtensionInfo, 0, len(srcPaths)) + for _, srcPath := range srcPaths { + t, e := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{ + ArchiveInnerArgs: model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: req.ArchivePass, + }, + InnerPath: utils.FixAndCleanPath(req.InnerPath), + }, + CacheFull: req.CacheFull, + PutIntoNewDir: req.PutIntoNewDir, + }) + if e != nil { + if errors.Is(e, errs.WrongArchivePassword) { + common.ErrorResp(c, e, 202) + } else { + common.ErrorResp(c, e, 500) + } + return + } + if t != nil { + tasks = append(tasks, t) + } + } + common.SuccessResp(c, gin.H{ + "task": getTaskInfos(tasks), + }) +} + +func ArchiveDown(c *gin.Context) { + archiveRawPath := c.MustGet("path").(string) + innerPath := utils.FixAndCleanPath(c.Query("inner")) + password := c.Query("pass") + filename := stdpath.Base(innerPath) + storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if common.ShouldProxy(storage, filename) { + ArchiveProxy(c) + return + } else { + link, _, err := fs.ArchiveDriverExtract(c, archiveRawPath, model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + IP: c.ClientIP(), + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + Redirect: true, + }, + Password: password, + }, + InnerPath: innerPath, + }) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + down(c, link) + } +} + +func ArchiveProxy(c *gin.Context) { + archiveRawPath := c.MustGet("path").(string) + innerPath := utils.FixAndCleanPath(c.Query("inner")) + password := c.Query("pass") + filename := stdpath.Base(innerPath) + storage, err := fs.GetStorage(archiveRawPath, &fs.GetStoragesArgs{}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if canProxy(storage, filename) { + // TODO: Support external download proxy URL + link, file, err := fs.ArchiveDriverExtract(c, archiveRawPath, model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: password, + }, + InnerPath: innerPath, + }) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + localProxy(c, link, file, storage.GetStorage().ProxyRange) + } else { + common.ErrorStrResp(c, "proxy not allowed", 403) + return + } +} + +func ArchiveInternalExtract(c *gin.Context) { + archiveRawPath := c.MustGet("path").(string) + innerPath := utils.FixAndCleanPath(c.Query("inner")) + password := c.Query("pass") + rc, size, err := fs.ArchiveInternalExtract(c, archiveRawPath, model.ArchiveInnerArgs{ + ArchiveArgs: model.ArchiveArgs{ + LinkArgs: model.LinkArgs{ + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + }, + Password: password, + }, + InnerPath: innerPath, + }) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + defer func() { + if err := rc.Close(); err != nil { + log.Errorf("failed to close file streamer, %v", err) + } + }() + headers := map[string]string{ + "Referrer-Policy": "no-referrer", + "Cache-Control": "max-age=0, no-cache, no-store, must-revalidate", + } + filename := stdpath.Base(innerPath) + headers["Content-Disposition"] = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename)) + contentType := c.Request.Header.Get("Content-Type") + if contentType == "" { + contentType = utils.GetMimeType(filename) + } + c.DataFromReader(200, size, contentType, rc, headers) +} + +func ArchiveExtensions(c *gin.Context) { + var ext []string + for key := range tool.Tools { + ext = append(ext, key) + } + common.SuccessResp(c, ext) +} diff --git a/server/handles/auth.go b/server/handles/auth.go index e1f512c4..7a2c0fb5 100644 --- a/server/handles/auth.go +++ b/server/handles/auth.go @@ -113,6 +113,10 @@ func UpdateCurrent(c *gin.Context) { return } user := c.MustGet("user").(*model.User) + if user.IsGuest() { + common.ErrorStrResp(c, "Guest user can not update profile", 403) + return + } user.Username = req.Username if req.Password != "" { user.SetPassword(req.Password) diff --git a/server/handles/const.go b/server/handles/const.go new file mode 100644 index 00000000..b108c9da --- /dev/null +++ b/server/handles/const.go @@ -0,0 +1,7 @@ +package handles + +const ( + CANCEL = "cancel" + OVERWRITE = "overwrite" + SKIP = "skip" +) diff --git a/server/handles/down.go b/server/handles/down.go index 0020ed14..2c5c2faf 100644 --- a/server/handles/down.go +++ b/server/handles/down.go @@ -1,9 +1,11 @@ package handles import ( + "bytes" "fmt" "io" stdpath "path" + "strconv" "strings" "github.com/alist-org/alist/v3/internal/conf" @@ -15,7 +17,9 @@ import ( "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" + "github.com/microcosm-cc/bluemonday" log "github.com/sirupsen/logrus" + "github.com/yuin/goldmark" ) func Down(c *gin.Context) { @@ -31,37 +35,17 @@ func Down(c *gin.Context) { return } else { link, _, err := fs.Link(c, rawPath, model.LinkArgs{ - IP: c.ClientIP(), - Header: c.Request.Header, - Type: c.Query("type"), - HttpReq: c.Request, + IP: c.ClientIP(), + Header: c.Request.Header, + Type: c.Query("type"), + HttpReq: c.Request, + Redirect: true, }) if err != nil { common.ErrorResp(c, err, 500) return } - if link.MFile != nil { - defer func(ReadSeekCloser io.ReadCloser) { - err := ReadSeekCloser.Close() - if err != nil { - log.Errorf("close data error: %s", err) - } - }(link.MFile) - } - c.Header("Referrer-Policy", "no-referrer") - c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate") - if setting.GetBool(conf.ForwardDirectLinkParams) { - query := c.Request.URL.Query() - for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] { - query.Del(v) - } - link.URL, err = utils.InjectQuery(link.URL, query) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - } - c.Redirect(302, link.URL) + down(c, link) } } @@ -95,31 +79,94 @@ func Proxy(c *gin.Context) { common.ErrorResp(c, err, 500) return } - if link.URL != "" && setting.GetBool(conf.ForwardDirectLinkParams) { - query := c.Request.URL.Query() - for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] { - query.Del(v) - } - link.URL, err = utils.InjectQuery(link.URL, query) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - } - if storage.GetStorage().ProxyRange { - common.ProxyRange(link, file.GetSize()) - } - err = common.Proxy(c.Writer, c.Request, link, file) - if err != nil { - common.ErrorResp(c, err, 500, true) - return - } + localProxy(c, link, file, storage.GetStorage().ProxyRange) } else { common.ErrorStrResp(c, "proxy not allowed", 403) return } } +func down(c *gin.Context, link *model.Link) { + var err error + if link.MFile != nil { + defer func(ReadSeekCloser io.ReadCloser) { + err := ReadSeekCloser.Close() + if err != nil { + log.Errorf("close data error: %s", err) + } + }(link.MFile) + } + c.Header("Referrer-Policy", "no-referrer") + c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate") + if setting.GetBool(conf.ForwardDirectLinkParams) { + query := c.Request.URL.Query() + for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] { + query.Del(v) + } + link.URL, err = utils.InjectQuery(link.URL, query) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + } + c.Redirect(302, link.URL) +} + +func localProxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) { + var err error + if link.URL != "" && setting.GetBool(conf.ForwardDirectLinkParams) { + query := c.Request.URL.Query() + for _, v := range conf.SlicesMap[conf.IgnoreDirectLinkParams] { + query.Del(v) + } + link.URL, err = utils.InjectQuery(link.URL, query) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + } + if proxyRange { + common.ProxyRange(link, file.GetSize()) + } + Writer := &common.WrittenResponseWriter{ResponseWriter: c.Writer} + + //优先处理md文件 + if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) { + buf := bytes.NewBuffer(make([]byte, 0, file.GetSize())) + w := &common.InterceptResponseWriter{ResponseWriter: Writer, Writer: buf} + err = common.Proxy(w, c.Request, link, file) + if err == nil && buf.Len() > 0 { + if c.Writer.Status() < 200 || c.Writer.Status() > 300 { + c.Writer.Write(buf.Bytes()) + return + } + + var html bytes.Buffer + if err = goldmark.Convert(buf.Bytes(), &html); err != nil { + err = fmt.Errorf("markdown conversion failed: %w", err) + } else { + buf.Reset() + err = bluemonday.UGCPolicy().SanitizeReaderToWriter(&html, buf) + if err == nil { + Writer.Header().Set("Content-Length", strconv.FormatInt(int64(buf.Len()), 10)) + Writer.Header().Set("Content-Type", "text/html; charset=utf-8") + _, err = utils.CopyWithBuffer(Writer, buf) + } + } + } + } else { + err = common.Proxy(Writer, c.Request, link, file) + } + if err == nil { + return + } + if Writer.IsWritten() { + log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err) + } else { + common.ErrorResp(c, err, 500, true) + } +} + // TODO need optimize // when can be proxy? // 1. text file diff --git a/server/handles/fsbatch.go b/server/handles/fsbatch.go index fa7971df..3841bff5 100644 --- a/server/handles/fsbatch.go +++ b/server/handles/fsbatch.go @@ -3,6 +3,7 @@ package handles import ( "fmt" "regexp" + "slices" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" @@ -14,6 +15,125 @@ import ( "github.com/pkg/errors" ) +type RecursiveMoveReq struct { + SrcDir string `json:"src_dir"` + DstDir string `json:"dst_dir"` + ConflictPolicy string `json:"conflict_policy"` +} + +func FsRecursiveMove(c *gin.Context) { + var req RecursiveMoveReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + + user := c.MustGet("user").(*model.User) + if !user.CanMove() { + common.ErrorResp(c, errs.PermissionDenied, 403) + return + } + srcDir, err := user.JoinPath(req.SrcDir) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + dstDir, err := user.JoinPath(req.DstDir) + if err != nil { + common.ErrorResp(c, err, 403) + return + } + + meta, err := op.GetNearestMeta(srcDir) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + return + } + } + c.Set("meta", meta) + + rootFiles, err := fs.List(c, srcDir, &fs.ListArgs{}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + + var existingFileNames []string + if req.ConflictPolicy != OVERWRITE { + dstFiles, err := fs.List(c, dstDir, &fs.ListArgs{}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + existingFileNames = make([]string, 0, len(dstFiles)) + for _, dstFile := range dstFiles { + existingFileNames = append(existingFileNames, dstFile.GetName()) + } + } + + // record the file path + filePathMap := make(map[model.Obj]string) + movingFiles := generic.NewQueue[model.Obj]() + movingFileNames := make([]string, 0, len(rootFiles)) + for _, file := range rootFiles { + movingFiles.Push(file) + filePathMap[file] = srcDir + } + + for !movingFiles.IsEmpty() { + + movingFile := movingFiles.Pop() + movingFilePath := filePathMap[movingFile] + movingFileName := fmt.Sprintf("%s/%s", movingFilePath, movingFile.GetName()) + if movingFile.IsDir() { + // directory, recursive move + subFilePath := movingFileName + subFiles, err := fs.List(c, movingFileName, &fs.ListArgs{Refresh: true}) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + for _, subFile := range subFiles { + movingFiles.Push(subFile) + filePathMap[subFile] = subFilePath + } + } else { + if movingFilePath == dstDir { + // same directory, don't move + continue + } + + if slices.Contains(existingFileNames, movingFile.GetName()) { + if req.ConflictPolicy == CANCEL { + common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", movingFile.GetName()), 403) + return + } else if req.ConflictPolicy == SKIP { + continue + } + } else if req.ConflictPolicy != OVERWRITE { + existingFileNames = append(existingFileNames, movingFile.GetName()) + } + movingFileNames = append(movingFileNames, movingFileName) + + } + + } + + var count = 0 + for i, fileName := range movingFileNames { + // move + err := fs.Move(c, fileName, dstDir, len(movingFileNames) > i+1) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + count++ + } + + common.SuccessWithMsgResp(c, fmt.Sprintf("Successfully moved %d %s", count, common.Pluralize(count, "file", "files"))) +} + type BatchRenameReq struct { SrcDir string `json:"src_dir"` RenameObjects []struct { @@ -61,94 +181,6 @@ func FsBatchRename(c *gin.Context) { common.SuccessResp(c) } -type RecursiveMoveReq struct { - SrcDir string `json:"src_dir"` - DstDir string `json:"dst_dir"` -} - -func FsRecursiveMove(c *gin.Context) { - var req RecursiveMoveReq - if err := c.ShouldBind(&req); err != nil { - common.ErrorResp(c, err, 400) - return - } - - user := c.MustGet("user").(*model.User) - if !user.CanMove() { - common.ErrorResp(c, errs.PermissionDenied, 403) - return - } - srcDir, err := user.JoinPath(req.SrcDir) - if err != nil { - common.ErrorResp(c, err, 403) - return - } - dstDir, err := user.JoinPath(req.DstDir) - if err != nil { - common.ErrorResp(c, err, 403) - return - } - - meta, err := op.GetNearestMeta(srcDir) - if err != nil { - if !errors.Is(errors.Cause(err), errs.MetaNotFound) { - common.ErrorResp(c, err, 500, true) - return - } - } - c.Set("meta", meta) - - rootFiles, err := fs.List(c, srcDir, &fs.ListArgs{}) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - - // record the file path - filePathMap := make(map[model.Obj]string) - movingFiles := generic.NewQueue[model.Obj]() - for _, file := range rootFiles { - movingFiles.Push(file) - filePathMap[file] = srcDir - } - - for !movingFiles.IsEmpty() { - - movingFile := movingFiles.Pop() - movingFilePath := filePathMap[movingFile] - movingFileName := fmt.Sprintf("%s/%s", movingFilePath, movingFile.GetName()) - if movingFile.IsDir() { - // directory, recursive move - subFilePath := movingFileName - subFiles, err := fs.List(c, movingFileName, &fs.ListArgs{Refresh: true}) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - for _, subFile := range subFiles { - movingFiles.Push(subFile) - filePathMap[subFile] = subFilePath - } - } else { - - if movingFilePath == dstDir { - // same directory, don't move - continue - } - - // move - err := fs.Move(c, movingFileName, dstDir, movingFiles.IsEmpty()) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - } - - } - - common.SuccessResp(c) -} - type RegexRenameReq struct { SrcDir string `json:"src_dir"` SrcNameRegex string `json:"src_name_regex"` diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go index 3d446eda..c527464e 100644 --- a/server/handles/fsmanage.go +++ b/server/handles/fsmanage.go @@ -2,7 +2,7 @@ package handles import ( "fmt" - "github.com/xhofe/tache" + "github.com/alist-org/alist/v3/internal/task" "io" stdpath "path" @@ -56,9 +56,10 @@ func FsMkdir(c *gin.Context) { } type MoveCopyReq struct { - SrcDir string `json:"src_dir"` - DstDir string `json:"dst_dir"` - Names []string `json:"names"` + SrcDir string `json:"src_dir"` + DstDir string `json:"dst_dir"` + Names []string `json:"names"` + Overwrite bool `json:"overwrite"` } func FsMove(c *gin.Context) { @@ -86,6 +87,14 @@ func FsMove(c *gin.Context) { common.ErrorResp(c, err, 403) return } + if !req.Overwrite { + for _, name := range req.Names { + if res, _ := fs.Get(c, stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil { + common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403) + return + } + } + } for i, name := range req.Names { err := fs.Move(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1) if err != nil { @@ -121,7 +130,15 @@ func FsCopy(c *gin.Context) { common.ErrorResp(c, err, 403) return } - var addedTasks []tache.TaskWithInfo + if !req.Overwrite { + for _, name := range req.Names { + if res, _ := fs.Get(c, stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil { + common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403) + return + } + } + } + var addedTasks []task.TaskExtensionInfo for i, name := range req.Names { t, err := fs.Copy(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1) if t != nil { @@ -138,8 +155,9 @@ func FsCopy(c *gin.Context) { } type RenameReq struct { - Path string `json:"path"` - Name string `json:"name"` + Path string `json:"path"` + Name string `json:"name"` + Overwrite bool `json:"overwrite"` } func FsRename(c *gin.Context) { @@ -158,6 +176,15 @@ func FsRename(c *gin.Context) { common.ErrorResp(c, err, 403) return } + if !req.Overwrite { + dstPath := stdpath.Join(stdpath.Dir(reqPath), req.Name) + if dstPath != reqPath { + if res, _ := fs.Get(c, dstPath, &fs.GetArgs{NoLog: true}); res != nil { + common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", req.Name), 403) + return + } + } + } if err := fs.Rename(c, reqPath, req.Name); err != nil { common.ErrorResp(c, err, 500) return diff --git a/server/handles/fsread.go b/server/handles/fsread.go index 7c580f63..73bde23b 100644 --- a/server/handles/fsread.go +++ b/server/handles/fsread.go @@ -33,6 +33,8 @@ type DirReq struct { } type ObjResp struct { + Id string `json:"id"` + Path string `json:"path"` Name string `json:"name"` Size int64 `json:"size"` IsDir bool `json:"is_dir"` @@ -210,6 +212,8 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp { for _, obj := range objs { thumb, _ := model.GetThumb(obj) resp = append(resp, ObjResp{ + Id: obj.GetID(), + Path: obj.GetPath(), Name: obj.GetName(), Size: obj.GetSize(), IsDir: obj.IsDir(), @@ -303,9 +307,10 @@ func FsGet(c *gin.Context) { } else { // if storage is not proxy, use raw url by fs.Link link, _, err := fs.Link(c, reqPath, model.LinkArgs{ - IP: c.ClientIP(), - Header: c.Request.Header, - HttpReq: c.Request, + IP: c.ClientIP(), + Header: c.Request.Header, + HttpReq: c.Request, + Redirect: true, }) if err != nil { common.ErrorResp(c, err, 500) @@ -325,6 +330,8 @@ func FsGet(c *gin.Context) { thumb, _ := model.GetThumb(obj) common.SuccessResp(c, FsGetResp{ ObjResp: ObjResp{ + Id: obj.GetID(), + Path: obj.GetPath(), Name: obj.GetName(), Size: obj.GetSize(), IsDir: obj.IsDir(), diff --git a/server/handles/fsup.go b/server/handles/fsup.go index ef9baa11..41344fb8 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -1,17 +1,17 @@ package handles import ( - "github.com/xhofe/tache" "io" "net/url" stdpath "path" "strconv" "time" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/internal/task" + "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" ) @@ -35,12 +35,20 @@ func FsStream(c *gin.Context) { return } asTask := c.GetHeader("As-Task") == "true" + overwrite := c.GetHeader("Overwrite") != "false" user := c.MustGet("user").(*model.User) path, err = user.JoinPath(path) if err != nil { common.ErrorResp(c, err, 403) return } + if !overwrite { + if res, _ := fs.Get(c, path, &fs.GetArgs{NoLog: true}); res != nil { + _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body) + common.ErrorStrResp(c, "file exists", 403) + return + } + } dir, name := stdpath.Split(path) sizeStr := c.GetHeader("Content-Length") size, err := strconv.ParseInt(sizeStr, 10, 64) @@ -48,19 +56,34 @@ func FsStream(c *gin.Context) { common.ErrorResp(c, err, 400) return } + h := make(map[*utils.HashType]string) + if md5 := c.GetHeader("X-File-Md5"); md5 != "" { + h[utils.MD5] = md5 + } + if sha1 := c.GetHeader("X-File-Sha1"); sha1 != "" { + h[utils.SHA1] = sha1 + } + if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" { + h[utils.SHA256] = sha256 + } + mimetype := c.GetHeader("Content-Type") + if len(mimetype) == 0 { + mimetype = utils.GetMimeType(name) + } s := &stream.FileStream{ Obj: &model.Object{ Name: name, Size: size, Modified: getLastModified(c), + HashInfo: utils.NewHashInfoByMap(h), }, Reader: c.Request.Body, - Mimetype: c.GetHeader("Content-Type"), + Mimetype: mimetype, WebPutAsTask: asTask, } - var t tache.TaskWithInfo + var t task.TaskExtensionInfo if asTask { - t, err = fs.PutAsTask(dir, s) + t, err = fs.PutAsTask(c, dir, s) } else { err = fs.PutDirectly(c, dir, s, true) } @@ -70,6 +93,9 @@ func FsStream(c *gin.Context) { return } if t == nil { + if n, _ := io.ReadFull(c.Request.Body, []byte{0}); n == 1 { + _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body) + } common.SuccessResp(c) return } @@ -86,12 +112,20 @@ func FsForm(c *gin.Context) { return } asTask := c.GetHeader("As-Task") == "true" + overwrite := c.GetHeader("Overwrite") != "false" user := c.MustGet("user").(*model.User) path, err = user.JoinPath(path) if err != nil { common.ErrorResp(c, err, 403) return } + if !overwrite { + if res, _ := fs.Get(c, path, &fs.GetArgs{NoLog: true}); res != nil { + _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body) + common.ErrorStrResp(c, "file exists", 403) + return + } + } storage, err := fs.GetStorage(path, &fs.GetStoragesArgs{}) if err != nil { common.ErrorResp(c, err, 400) @@ -113,29 +147,39 @@ func FsForm(c *gin.Context) { } defer f.Close() dir, name := stdpath.Split(path) + h := make(map[*utils.HashType]string) + if md5 := c.GetHeader("X-File-Md5"); md5 != "" { + h[utils.MD5] = md5 + } + if sha1 := c.GetHeader("X-File-Sha1"); sha1 != "" { + h[utils.SHA1] = sha1 + } + if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" { + h[utils.SHA256] = sha256 + } + mimetype := file.Header.Get("Content-Type") + if len(mimetype) == 0 { + mimetype = utils.GetMimeType(name) + } s := stream.FileStream{ Obj: &model.Object{ Name: name, Size: file.Size, Modified: getLastModified(c), + HashInfo: utils.NewHashInfoByMap(h), }, Reader: f, - Mimetype: file.Header.Get("Content-Type"), + Mimetype: mimetype, WebPutAsTask: asTask, } - var t tache.TaskWithInfo + var t task.TaskExtensionInfo if asTask { s.Reader = struct { io.Reader }{f} - t, err = fs.PutAsTask(dir, &s) + t, err = fs.PutAsTask(c, dir, &s) } else { - ss, err := stream.NewSeekableStream(s, nil) - if err != nil { - common.ErrorResp(c, err, 500) - return - } - err = fs.PutDirectly(c, dir, ss, true) + err = fs.PutDirectly(c, dir, &s, true) } if err != nil { common.ErrorResp(c, err, 500) diff --git a/server/handles/offline_download.go b/server/handles/offline_download.go index 0b019e9e..24ff7a05 100644 --- a/server/handles/offline_download.go +++ b/server/handles/offline_download.go @@ -1,13 +1,16 @@ package handles import ( + _115 "github.com/alist-org/alist/v3/drivers/115" + "github.com/alist-org/alist/v3/drivers/pikpak" + "github.com/alist-org/alist/v3/drivers/thunder" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/offline_download/tool" "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/task" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" - "github.com/xhofe/tache" ) type SetAria2Req struct { @@ -30,6 +33,10 @@ func SetAria2(c *gin.Context) { return } _tool, err := tool.Tools.Get("aria2") + if err != nil { + common.ErrorResp(c, err, 500) + return + } version, err := _tool.Init() if err != nil { common.ErrorResp(c, err, 500) @@ -69,6 +76,169 @@ func SetQbittorrent(c *gin.Context) { common.SuccessResp(c, "ok") } +type SetTransmissionReq struct { + Uri string `json:"uri" form:"uri"` + Seedtime string `json:"seedtime" form:"seedtime"` +} + +func SetTransmission(c *gin.Context) { + var req SetTransmissionReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + items := []model.SettingItem{ + {Key: conf.TransmissionUri, Value: req.Uri, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + {Key: conf.TransmissionSeedtime, Value: req.Seedtime, Type: conf.TypeNumber, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } + if err := op.SaveSettingItems(items); err != nil { + common.ErrorResp(c, err, 500) + return + } + _tool, err := tool.Tools.Get("Transmission") + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if _, err := _tool.Init(); err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, "ok") +} + +type Set115Req struct { + TempDir string `json:"temp_dir" form:"temp_dir"` +} + +func Set115(c *gin.Context) { + var req Set115Req + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + if req.TempDir != "" { + storage, _, err := op.GetStorageAndActualPath(req.TempDir) + if err != nil { + common.ErrorStrResp(c, "storage does not exists", 400) + return + } + if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK { + common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400) + return + } + if _, ok := storage.(*_115.Pan115); !ok { + common.ErrorStrResp(c, "unsupported storage driver for offline download, only 115 Cloud is supported", 400) + return + } + } + items := []model.SettingItem{ + {Key: conf.Pan115TempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } + if err := op.SaveSettingItems(items); err != nil { + common.ErrorResp(c, err, 500) + return + } + _tool, err := tool.Tools.Get("115 Cloud") + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if _, err := _tool.Init(); err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, "ok") +} + +type SetPikPakReq struct { + TempDir string `json:"temp_dir" form:"temp_dir"` +} + +func SetPikPak(c *gin.Context) { + var req SetPikPakReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + if req.TempDir != "" { + storage, _, err := op.GetStorageAndActualPath(req.TempDir) + if err != nil { + common.ErrorStrResp(c, "storage does not exists", 400) + return + } + if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK { + common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400) + return + } + if _, ok := storage.(*pikpak.PikPak); !ok { + common.ErrorStrResp(c, "unsupported storage driver for offline download, only PikPak is supported", 400) + return + } + } + items := []model.SettingItem{ + {Key: conf.PikPakTempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } + if err := op.SaveSettingItems(items); err != nil { + common.ErrorResp(c, err, 500) + return + } + _tool, err := tool.Tools.Get("PikPak") + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if _, err := _tool.Init(); err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, "ok") +} + +type SetThunderReq struct { + TempDir string `json:"temp_dir" form:"temp_dir"` +} + +func SetThunder(c *gin.Context) { + var req SetThunderReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + if req.TempDir != "" { + storage, _, err := op.GetStorageAndActualPath(req.TempDir) + if err != nil { + common.ErrorStrResp(c, "storage does not exists", 400) + return + } + if storage.Config().CheckStatus && storage.GetStorage().Status != op.WORK { + common.ErrorStrResp(c, "storage not init: "+storage.GetStorage().Status, 400) + return + } + if _, ok := storage.(*thunder.Thunder); !ok { + common.ErrorStrResp(c, "unsupported storage driver for offline download, only Thunder is supported", 400) + return + } + } + items := []model.SettingItem{ + {Key: conf.ThunderTempDir, Value: req.TempDir, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE}, + } + if err := op.SaveSettingItems(items); err != nil { + common.ErrorResp(c, err, 500) + return + } + _tool, err := tool.Tools.Get("Thunder") + if err != nil { + common.ErrorResp(c, err, 500) + return + } + if _, err := _tool.Init(); err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, "ok") +} + func OfflineDownloadTools(c *gin.Context) { tools := tool.Tools.Names() common.SuccessResp(c, tools) @@ -98,7 +268,7 @@ func AddOfflineDownload(c *gin.Context) { common.ErrorResp(c, err, 403) return } - var tasks []tache.TaskWithInfo + var tasks []task.TaskExtensionInfo for _, url := range req.Urls { t, err := tool.AddURL(c, &tool.AddURLArgs{ URL: url, @@ -110,7 +280,9 @@ func AddOfflineDownload(c *gin.Context) { common.ErrorResp(c, err, 500) return } - tasks = append(tasks, t) + if t != nil { + tasks = append(tasks, t) + } } common.SuccessResp(c, gin.H{ "tasks": getTaskInfos(tasks), diff --git a/server/handles/sshkey.go b/server/handles/sshkey.go new file mode 100644 index 00000000..6f8d46b4 --- /dev/null +++ b/server/handles/sshkey.go @@ -0,0 +1,125 @@ +package handles + +import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/server/common" + "github.com/gin-gonic/gin" + "strconv" + "strings" +) + +type SSHKeyAddReq struct { + Title string `json:"title" binding:"required"` + Key string `json:"key" binding:"required"` +} + +func AddMyPublicKey(c *gin.Context) { + userObj, ok := c.Value("user").(*model.User) + if !ok || userObj.IsGuest() { + common.ErrorStrResp(c, "user invalid", 401) + return + } + var req SSHKeyAddReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorStrResp(c, "request invalid", 400) + return + } + if req.Title == "" { + common.ErrorStrResp(c, "request invalid", 400) + return + } + key := &model.SSHPublicKey{ + Title: req.Title, + KeyStr: strings.TrimSpace(req.Key), + UserId: userObj.ID, + } + err, parsed := op.CreateSSHPublicKey(key) + if !parsed { + common.ErrorStrResp(c, "provided key invalid", 400) + return + } else if err != nil { + common.ErrorResp(c, err, 500, true) + return + } + common.SuccessResp(c) +} + +func ListMyPublicKey(c *gin.Context) { + userObj, ok := c.Value("user").(*model.User) + if !ok || userObj.IsGuest() { + common.ErrorStrResp(c, "user invalid", 401) + return + } + list(c, userObj) +} + +func DeleteMyPublicKey(c *gin.Context) { + userObj, ok := c.Value("user").(*model.User) + if !ok || userObj.IsGuest() { + common.ErrorStrResp(c, "user invalid", 401) + return + } + keyId, err := strconv.Atoi(c.Query("id")) + if err != nil { + common.ErrorStrResp(c, "id format invalid", 400) + return + } + key, err := op.GetSSHPublicKeyByIdAndUserId(uint(keyId), userObj.ID) + if err != nil { + common.ErrorStrResp(c, "failed to get public key", 404) + return + } + err = op.DeleteSSHPublicKeyById(key.ID) + if err != nil { + common.ErrorResp(c, err, 500, true) + return + } + common.SuccessResp(c) +} + +func ListPublicKeys(c *gin.Context) { + userId, err := strconv.Atoi(c.Query("uid")) + if err != nil { + common.ErrorStrResp(c, "user id format invalid", 400) + return + } + userObj, err := op.GetUserById(uint(userId)) + if err != nil { + common.ErrorStrResp(c, "user invalid", 404) + return + } + list(c, userObj) +} + +func DeletePublicKey(c *gin.Context) { + keyId, err := strconv.Atoi(c.Query("id")) + if err != nil { + common.ErrorStrResp(c, "id format invalid", 400) + return + } + err = op.DeleteSSHPublicKeyById(uint(keyId)) + if err != nil { + common.ErrorResp(c, err, 500, true) + return + } + common.SuccessResp(c) +} + +func list(c *gin.Context, userObj *model.User) { + var req model.PageReq + if err := c.ShouldBind(&req); err != nil { + common.ErrorResp(c, err, 400) + return + } + req.Validate() + keys, total, err := op.GetSSHPublicKeyByUserId(userObj.ID, req.Page, req.PerPage) + if err != nil { + common.ErrorResp(c, err, 500, true) + return + } + common.SuccessResp(c, common.PageResp{ + Content: keys, + Total: total, + }) +} diff --git a/server/handles/ssologin.go b/server/handles/ssologin.go index 70298a9c..62bd4aaa 100644 --- a/server/handles/ssologin.go +++ b/server/handles/ssologin.go @@ -1,7 +1,6 @@ package handles import ( - "encoding/base32" "encoding/base64" "errors" "fmt" @@ -11,6 +10,8 @@ import ( "strings" "time" + "github.com/Xhofe/go-cache" + "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/db" "github.com/alist-org/alist/v3/internal/model" @@ -21,29 +22,45 @@ import ( "github.com/coreos/go-oidc" "github.com/gin-gonic/gin" "github.com/go-resty/resty/v2" - "github.com/pquerna/otp" - "github.com/pquerna/otp/totp" "golang.org/x/oauth2" "gorm.io/gorm" ) -var opts = totp.ValidateOpts{ - // state verify won't expire in 30 secs, which is quite enough for the callback - Period: 30, - Skew: 1, - // in some OIDC providers(such as Authelia), state parameter must be at least 8 characters - Digits: otp.DigitsEight, - Algorithm: otp.AlgorithmSHA1, +const stateLength = 16 +const stateExpire = time.Minute * 5 + +var stateCache = cache.NewMemCache[string](cache.WithShards[string](stateLength)) + +func _keyState(clientID, state string) string { + return fmt.Sprintf("%s_%s", clientID, state) +} + +func generateState(clientID, ip string) string { + state := random.String(stateLength) + stateCache.Set(_keyState(clientID, state), ip, cache.WithEx[string](stateExpire)) + return state +} + +func verifyState(clientID, ip, state string) bool { + value, ok := stateCache.Get(_keyState(clientID, state)) + return ok && value == ip +} + +func ssoRedirectUri(c *gin.Context, useCompatibility bool, method string) string { + if useCompatibility { + return common.GetApiUrl(c.Request) + "/api/auth/" + method + } else { + return common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + method + } } func SSOLoginRedirect(c *gin.Context) { method := c.Query("method") - usecompatibility := setting.GetBool(conf.SSOCompatibilityMode) + useCompatibility := setting.GetBool(conf.SSOCompatibilityMode) enabled := setting.GetBool(conf.SSOLoginEnabled) clientId := setting.GetStr(conf.SSOClientId) platform := setting.GetStr(conf.SSOLoginPlatform) - var r_url string - var redirect_uri string + var rUrl string if !enabled { common.ErrorStrResp(c, "Single sign-on is not enabled", 403) return @@ -53,69 +70,52 @@ func SSOLoginRedirect(c *gin.Context) { common.ErrorStrResp(c, "no method provided", 400) return } - if usecompatibility { - redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/" + method - } else { - redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + method - } + redirectUri := ssoRedirectUri(c, useCompatibility, method) urlValues.Add("response_type", "code") - urlValues.Add("redirect_uri", redirect_uri) + urlValues.Add("redirect_uri", redirectUri) urlValues.Add("client_id", clientId) switch platform { case "Github": - r_url = "https://github.com/login/oauth/authorize?" + rUrl = "https://github.com/login/oauth/authorize?" urlValues.Add("scope", "read:user") case "Microsoft": - r_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize?" + rUrl = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize?" urlValues.Add("scope", "user.read") urlValues.Add("response_mode", "query") case "Google": - r_url = "https://accounts.google.com/o/oauth2/v2/auth?" + rUrl = "https://accounts.google.com/o/oauth2/v2/auth?" urlValues.Add("scope", "https://www.googleapis.com/auth/userinfo.profile") case "Dingtalk": - r_url = "https://login.dingtalk.com/oauth2/auth?" + rUrl = "https://login.dingtalk.com/oauth2/auth?" urlValues.Add("scope", "openid") urlValues.Add("prompt", "consent") urlValues.Add("response_type", "code") case "Casdoor": endpoint := strings.TrimSuffix(setting.GetStr(conf.SSOEndpointName), "/") - r_url = endpoint + "/login/oauth/authorize?" + rUrl = endpoint + "/login/oauth/authorize?" urlValues.Add("scope", "profile") urlValues.Add("state", endpoint) case "OIDC": - oauth2Config, err := GetOIDCClient(c) - if err != nil { - common.ErrorStrResp(c, err.Error(), 400) - return - } - // generate state parameter - state, err := totp.GenerateCodeCustom(base32.StdEncoding.EncodeToString([]byte(oauth2Config.ClientSecret)), time.Now(), opts) + oauth2Config, err := GetOIDCClient(c, useCompatibility, redirectUri, method) if err != nil { common.ErrorStrResp(c, err.Error(), 400) return } + state := generateState(clientId, c.ClientIP()) c.Redirect(http.StatusFound, oauth2Config.AuthCodeURL(state)) return default: common.ErrorStrResp(c, "invalid platform", 400) return } - c.Redirect(302, r_url+urlValues.Encode()) + c.Redirect(302, rUrl+urlValues.Encode()) } var ssoClient = resty.New().SetRetryCount(3) -func GetOIDCClient(c *gin.Context) (*oauth2.Config, error) { - var redirect_uri string - usecompatibility := setting.GetBool(conf.SSOCompatibilityMode) - argument := c.Query("method") - if usecompatibility { - argument = path.Base(c.Request.URL.Path) - } - if usecompatibility { - redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/" + argument - } else { - redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + argument +func GetOIDCClient(c *gin.Context, useCompatibility bool, redirectUri, method string) (*oauth2.Config, error) { + if redirectUri == "" { + redirectUri = ssoRedirectUri(c, useCompatibility, method) } endpoint := setting.GetStr(conf.SSOEndpointName) provider, err := oidc.NewProvider(c, endpoint) @@ -124,16 +124,20 @@ func GetOIDCClient(c *gin.Context) (*oauth2.Config, error) { } clientId := setting.GetStr(conf.SSOClientId) clientSecret := setting.GetStr(conf.SSOClientSecret) + extraScopes := []string{} + if setting.GetStr(conf.SSOExtraScopes) != "" { + extraScopes = strings.Split(setting.GetStr(conf.SSOExtraScopes), " ") + } return &oauth2.Config{ ClientID: clientId, ClientSecret: clientSecret, - RedirectURL: redirect_uri, + RedirectURL: redirectUri, // Discovery returns the OAuth2 endpoints. Endpoint: provider.Endpoint(), // "openid" is a required scope for OpenID Connect flows. - Scopes: []string{oidc.ScopeOpenID, "profile"}, + Scopes: append([]string{oidc.ScopeOpenID, "profile"}, extraScopes...), }, nil } @@ -181,9 +185,9 @@ func parseJWT(p string) ([]byte, error) { func OIDCLoginCallback(c *gin.Context) { useCompatibility := setting.GetBool(conf.SSOCompatibilityMode) - argument := c.Query("method") + method := c.Query("method") if useCompatibility { - argument = path.Base(c.Request.URL.Path) + method = path.Base(c.Request.URL.Path) } clientId := setting.GetStr(conf.SSOClientId) endpoint := setting.GetStr(conf.SSOEndpointName) @@ -192,18 +196,12 @@ func OIDCLoginCallback(c *gin.Context) { common.ErrorResp(c, err, 400) return } - oauth2Config, err := GetOIDCClient(c) + oauth2Config, err := GetOIDCClient(c, useCompatibility, "", method) if err != nil { common.ErrorResp(c, err, 400) return } - // add state verify process - stateVerification, err := totp.ValidateCustom(c.Query("state"), base32.StdEncoding.EncodeToString([]byte(oauth2Config.ClientSecret)), time.Now(), opts) - if err != nil { - common.ErrorResp(c, err, 400) - return - } - if !stateVerification { + if !verifyState(clientId, c.ClientIP(), c.Query("state")) { common.ErrorStrResp(c, "incorrect or expired state parameter", 400) return } @@ -236,7 +234,7 @@ func OIDCLoginCallback(c *gin.Context) { common.ErrorStrResp(c, "cannot get username from OIDC provider", 400) return } - if argument == "get_sso_id" { + if method == "get_sso_id" { if useCompatibility { c.Redirect(302, common.GetApiUrl(c.Request)+"/@manage?sso_id="+userID) return @@ -252,7 +250,7 @@ func OIDCLoginCallback(c *gin.Context) { c.Data(200, "text/html; charset=utf-8", []byte(html)) return } - if argument == "sso_get_token" { + if method == "sso_get_token" { user, err := db.GetUserBySSOID(userID) if err != nil { user, err = autoRegister(userID, userID, err) diff --git a/server/handles/task.go b/server/handles/task.go index a8b4d21b..af7974a9 100644 --- a/server/handles/task.go +++ b/server/handles/task.go @@ -1,7 +1,10 @@ package handles import ( + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/task" "math" + "time" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/offline_download/tool" @@ -12,15 +15,20 @@ import ( ) type TaskInfo struct { - ID string `json:"id"` - Name string `json:"name"` - State tache.State `json:"state"` - Status string `json:"status"` - Progress float64 `json:"progress"` - Error string `json:"error"` + ID string `json:"id"` + Name string `json:"name"` + Creator string `json:"creator"` + CreatorRole int `json:"creator_role"` + State tache.State `json:"state"` + Status string `json:"status"` + Progress float64 `json:"progress"` + StartTime *time.Time `json:"start_time"` + EndTime *time.Time `json:"end_time"` + TotalBytes int64 `json:"total_bytes"` + Error string `json:"error"` } -func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo { +func getTaskInfo[T task.TaskExtensionInfo](task T) TaskInfo { errMsg := "" if task.GetErr() != nil { errMsg = task.GetErr().Error() @@ -30,62 +38,179 @@ func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo { if math.IsNaN(progress) { progress = 100 } + creatorName := "" + creatorRole := -1 + if task.GetCreator() != nil { + creatorName = task.GetCreator().Username + creatorRole = task.GetCreator().Role + } return TaskInfo{ - ID: task.GetID(), - Name: task.GetName(), - State: task.GetState(), - Status: task.GetStatus(), - Progress: progress, - Error: errMsg, + ID: task.GetID(), + Name: task.GetName(), + Creator: creatorName, + CreatorRole: creatorRole, + State: task.GetState(), + Status: task.GetStatus(), + Progress: progress, + StartTime: task.GetStartTime(), + EndTime: task.GetEndTime(), + TotalBytes: task.GetTotalBytes(), + Error: errMsg, } } -func getTaskInfos[T tache.TaskWithInfo](tasks []T) []TaskInfo { +func getTaskInfos[T task.TaskExtensionInfo](tasks []T) []TaskInfo { return utils.MustSliceConvert(tasks, getTaskInfo[T]) } -func taskRoute[T tache.TaskWithInfo](g *gin.RouterGroup, manager *tache.Manager[T]) { - g.GET("/undone", func(c *gin.Context) { - common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StatePending, tache.StateRunning, - tache.StateCanceling, tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry))) - }) - g.GET("/done", func(c *gin.Context) { - common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded))) - }) - g.POST("/info", func(c *gin.Context) { - tid := c.Query("tid") - task, ok := manager.GetByID(tid) +func argsContains[T comparable](v T, slice ...T) bool { + return utils.SliceContains(slice, v) +} + +func getUserInfo(c *gin.Context) (bool, uint, bool) { + if user, ok := c.Value("user").(*model.User); ok { + return user.IsAdmin(), user.ID, true + } else { + return false, 0, false + } +} + +func getTargetedHandler[T task.TaskExtensionInfo](manager task.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc { + return func(c *gin.Context) { + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + t, ok := manager.GetByID(c.Query("tid")) if !ok { common.ErrorStrResp(c, "task not found", 404) return } + if !isAdmin && uid != t.GetCreator().ID { + // to avoid an attacker using error messages to guess valid TID, return a 404 rather than a 403 + common.ErrorStrResp(c, "task not found", 404) + return + } + callback(c, t) + } +} + +func getBatchHandler[T task.TaskExtensionInfo](manager task.Manager[T], callback func(task T)) gin.HandlerFunc { + return func(c *gin.Context) { + isAdmin, uid, ok := getUserInfo(c) + if !ok { + common.ErrorStrResp(c, "user invalid", 401) + return + } + var tids []string + if err := c.ShouldBind(&tids); err != nil { + common.ErrorStrResp(c, "invalid request format", 400) + return + } + retErrs := make(map[string]string) + for _, tid := range tids { + t, ok := manager.GetByID(tid) + if !ok || (!isAdmin && uid != t.GetCreator().ID) { + retErrs[tid] = "task not found" + continue + } + callback(t) + } + common.SuccessResp(c, retErrs) + } +} + +func taskRoute[T task.TaskExtensionInfo](g *gin.RouterGroup, manager task.Manager[T]) { + g.GET("/undone", func(c *gin.Context) { + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool { + // avoid directly passing the user object into the function to reduce closure size + return (isAdmin || uid == task.GetCreator().ID) && + argsContains(task.GetState(), tache.StatePending, tache.StateRunning, tache.StateCanceling, + tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry) + }))) + }) + g.GET("/done", func(c *gin.Context) { + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool { + return (isAdmin || uid == task.GetCreator().ID) && + argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded) + }))) + }) + g.POST("/info", getTargetedHandler(manager, func(c *gin.Context, task T) { common.SuccessResp(c, getTaskInfo(task)) - }) - g.POST("/cancel", func(c *gin.Context) { - tid := c.Query("tid") - manager.Cancel(tid) + })) + g.POST("/cancel", getTargetedHandler(manager, func(c *gin.Context, task T) { + manager.Cancel(task.GetID()) common.SuccessResp(c) - }) - g.POST("/delete", func(c *gin.Context) { - tid := c.Query("tid") - manager.Remove(tid) + })) + g.POST("/delete", getTargetedHandler(manager, func(c *gin.Context, task T) { + manager.Remove(task.GetID()) common.SuccessResp(c) - }) - g.POST("/retry", func(c *gin.Context) { - tid := c.Query("tid") - manager.Retry(tid) + })) + g.POST("/retry", getTargetedHandler(manager, func(c *gin.Context, task T) { + manager.Retry(task.GetID()) common.SuccessResp(c) - }) + })) + g.POST("/cancel_some", getBatchHandler(manager, func(task T) { + manager.Cancel(task.GetID()) + })) + g.POST("/delete_some", getBatchHandler(manager, func(task T) { + manager.Remove(task.GetID()) + })) + g.POST("/retry_some", getBatchHandler(manager, func(task T) { + manager.Retry(task.GetID()) + })) g.POST("/clear_done", func(c *gin.Context) { - manager.RemoveByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded) + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + manager.RemoveByCondition(func(task T) bool { + return (isAdmin || uid == task.GetCreator().ID) && + argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded) + }) common.SuccessResp(c) }) g.POST("/clear_succeeded", func(c *gin.Context) { - manager.RemoveByState(tache.StateSucceeded) + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + manager.RemoveByCondition(func(task T) bool { + return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateSucceeded + }) common.SuccessResp(c) }) g.POST("/retry_failed", func(c *gin.Context) { - manager.RetryAllFailed() + isAdmin, uid, ok := getUserInfo(c) + if !ok { + // if there is no bug, here is unreachable + common.ErrorStrResp(c, "user invalid", 401) + return + } + tasks := manager.GetByCondition(func(task T) bool { + return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateFailed + }) + for _, t := range tasks { + manager.Retry(t.GetID()) + } common.SuccessResp(c) }) } @@ -95,4 +220,6 @@ func SetupTaskRoute(g *gin.RouterGroup) { taskRoute(g.Group("/copy"), fs.CopyTaskManager) taskRoute(g.Group("/offline_download"), tool.DownloadTaskManager) taskRoute(g.Group("/offline_download_transfer"), tool.TransferTaskManager) + taskRoute(g.Group("/decompress"), fs.ArchiveDownloadTaskManager) + taskRoute(g.Group("/decompress_upload"), fs.ArchiveContentUploadTaskManager) } diff --git a/server/handles/webauthn.go b/server/handles/webauthn.go index 1bd1884e..c6a7650c 100644 --- a/server/handles/webauthn.go +++ b/server/handles/webauthn.go @@ -207,6 +207,10 @@ func DeleteAuthnLogin(c *gin.Context) { return } err = db.RemoveAuthn(user, req.ID) + if err != nil { + common.ErrorResp(c, err, 400) + return + } err = op.DelUserCache(user.Username) if err != nil { common.ErrorResp(c, err, 400) diff --git a/server/middlewares/auth.go b/server/middlewares/auth.go index 14f186be..d65d1ad6 100644 --- a/server/middlewares/auth.go +++ b/server/middlewares/auth.go @@ -127,6 +127,16 @@ func Authn(c *gin.Context) { c.Next() } +func AuthNotGuest(c *gin.Context) { + user := c.MustGet("user").(*model.User) + if user.IsGuest() { + common.ErrorStrResp(c, "You are a guest", 403) + c.Abort() + } else { + c.Next() + } +} + func AuthAdmin(c *gin.Context) { user := c.MustGet("user").(*model.User) if !user.IsAdmin() { diff --git a/server/middlewares/down.go b/server/middlewares/down.go index 05e9dc85..d015672d 100644 --- a/server/middlewares/down.go +++ b/server/middlewares/down.go @@ -9,35 +9,36 @@ import ( "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" - "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/gin-gonic/gin" "github.com/pkg/errors" ) -func Down(c *gin.Context) { - rawPath := parsePath(c.Param("path")) - c.Set("path", rawPath) - meta, err := op.GetNearestMeta(rawPath) - if err != nil { - if !errors.Is(errors.Cause(err), errs.MetaNotFound) { - common.ErrorResp(c, err, 500, true) - return - } - } - c.Set("meta", meta) - // verify sign - if needSign(meta, rawPath) { - s := c.Query("sign") - err = sign.Verify(rawPath, strings.TrimSuffix(s, "/")) +func Down(verifyFunc func(string, string) error) func(c *gin.Context) { + return func(c *gin.Context) { + rawPath := parsePath(c.Param("path")) + c.Set("path", rawPath) + meta, err := op.GetNearestMeta(rawPath) if err != nil { - common.ErrorResp(c, err, 401) - c.Abort() - return + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + return + } } + c.Set("meta", meta) + // verify sign + if needSign(meta, rawPath) { + s := c.Query("sign") + err = verifyFunc(rawPath, strings.TrimSuffix(s, "/")) + if err != nil { + common.ErrorResp(c, err, 401) + c.Abort() + return + } + } + c.Next() } - c.Next() } // TODO: implement diff --git a/server/middlewares/limit.go b/server/middlewares/limit.go index 44c079b3..2ccee950 100644 --- a/server/middlewares/limit.go +++ b/server/middlewares/limit.go @@ -1,7 +1,9 @@ package middlewares import ( + "github.com/alist-org/alist/v3/internal/stream" "github.com/gin-gonic/gin" + "io" ) func MaxAllowed(n int) gin.HandlerFunc { @@ -14,3 +16,37 @@ func MaxAllowed(n int) gin.HandlerFunc { c.Next() } } + +func UploadRateLimiter(limiter stream.Limiter) gin.HandlerFunc { + return func(c *gin.Context) { + c.Request.Body = &stream.RateLimitReader{ + Reader: c.Request.Body, + Limiter: limiter, + Ctx: c, + } + c.Next() + } +} + +type ResponseWriterWrapper struct { + gin.ResponseWriter + WrapWriter io.Writer +} + +func (w *ResponseWriterWrapper) Write(p []byte) (n int, err error) { + return w.WrapWriter.Write(p) +} + +func DownloadRateLimiter(limiter stream.Limiter) gin.HandlerFunc { + return func(c *gin.Context) { + c.Writer = &ResponseWriterWrapper{ + ResponseWriter: c.Writer, + WrapWriter: &stream.RateLimitWriter{ + Writer: c.Writer, + Limiter: limiter, + Ctx: c, + }, + } + c.Next() + } +} diff --git a/server/router.go b/server/router.go index 5be593f7..09a0bb44 100644 --- a/server/router.go +++ b/server/router.go @@ -4,6 +4,8 @@ import ( "github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/message" + "github.com/alist-org/alist/v3/internal/sign" + "github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" "github.com/alist-org/alist/v3/server/handles" @@ -38,10 +40,19 @@ func Init(e *gin.Engine) { WebDav(g.Group("/dav")) S3(g.Group("/s3")) - g.GET("/d/*path", middlewares.Down, handles.Down) - g.GET("/p/*path", middlewares.Down, handles.Proxy) - g.HEAD("/d/*path", middlewares.Down, handles.Down) - g.HEAD("/p/*path", middlewares.Down, handles.Proxy) + downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit) + signCheck := middlewares.Down(sign.Verify) + g.GET("/d/*path", signCheck, downloadLimiter, handles.Down) + g.GET("/p/*path", signCheck, downloadLimiter, handles.Proxy) + g.HEAD("/d/*path", signCheck, handles.Down) + g.HEAD("/p/*path", signCheck, handles.Proxy) + archiveSignCheck := middlewares.Down(sign.VerifyArchive) + g.GET("/ad/*path", archiveSignCheck, downloadLimiter, handles.ArchiveDown) + g.GET("/ap/*path", archiveSignCheck, downloadLimiter, handles.ArchiveProxy) + g.GET("/ae/*path", archiveSignCheck, downloadLimiter, handles.ArchiveInternalExtract) + g.HEAD("/ad/*path", archiveSignCheck, handles.ArchiveDown) + g.HEAD("/ap/*path", archiveSignCheck, handles.ArchiveProxy) + g.HEAD("/ae/*path", archiveSignCheck, handles.ArchiveInternalExtract) api := g.Group("/api") auth := api.Group("", middlewares.Auth) @@ -52,6 +63,9 @@ func Init(e *gin.Engine) { api.POST("/auth/login/ldap", handles.LoginLdap) auth.GET("/me", handles.CurrentUser) auth.POST("/me/update", handles.UpdateCurrent) + auth.GET("/me/sshkey/list", handles.ListMyPublicKey) + auth.POST("/me/sshkey/add", handles.AddMyPublicKey) + auth.POST("/me/sshkey/delete", handles.DeleteMyPublicKey) auth.POST("/auth/2fa/generate", handles.Generate2FA) auth.POST("/auth/2fa/verify", handles.Verify2FA) auth.GET("/auth/logout", handles.LogOut) @@ -62,11 +76,11 @@ func Init(e *gin.Engine) { api.GET("/auth/get_sso_id", handles.SSOLoginCallback) api.GET("/auth/sso_get_token", handles.SSOLoginCallback) - //webauthn + // webauthn + api.GET("/authn/webauthn_begin_login", handles.BeginAuthnLogin) + api.POST("/authn/webauthn_finish_login", handles.FinishAuthnLogin) webauthn.GET("/webauthn_begin_registration", handles.BeginAuthnRegistration) webauthn.POST("/webauthn_finish_registration", handles.FinishAuthnRegistration) - webauthn.GET("/webauthn_begin_login", handles.BeginAuthnLogin) - webauthn.POST("/webauthn_finish_login", handles.FinishAuthnLogin) webauthn.POST("/delete_authn", handles.DeleteAuthnLogin) webauthn.GET("/getcredentials", handles.GetAuthnCredentials) @@ -74,8 +88,10 @@ func Init(e *gin.Engine) { public := api.Group("/public") public.Any("/settings", handles.PublicSettings) public.Any("/offline_download_tools", handles.OfflineDownloadTools) + public.Any("/archive_extensions", handles.ArchiveExtensions) _fs(auth.Group("/fs")) + _task(auth.Group("/task", middlewares.AuthNotGuest)) admin(auth.Group("/admin", middlewares.AuthAdmin)) if flags.Debug || flags.Dev { debug(g.Group("/debug")) @@ -101,6 +117,8 @@ func admin(g *gin.RouterGroup) { user.POST("/cancel_2fa", handles.Cancel2FAById) user.POST("/delete", handles.DeleteUser) user.POST("/del_cache", handles.DelUserCache) + user.GET("/sshkey/list", handles.ListPublicKeys) + user.POST("/sshkey/delete", handles.DeletePublicKey) storage := g.Group("/storage") storage.GET("/list", handles.ListStorages) @@ -125,9 +143,13 @@ func admin(g *gin.RouterGroup) { setting.POST("/reset_token", handles.ResetToken) setting.POST("/set_aria2", handles.SetAria2) setting.POST("/set_qbit", handles.SetQbittorrent) + setting.POST("/set_transmission", handles.SetTransmission) + setting.POST("/set_115", handles.Set115) + setting.POST("/set_pikpak", handles.SetPikPak) + setting.POST("/set_thunder", handles.SetThunder) - task := g.Group("/task") - handles.SetupTaskRoute(task) + // retain /admin/task API to ensure compatibility with legacy automation scripts + _task(g.Group("/task")) ms := g.Group("/message") ms.POST("/get", message.HttpInstance.GetHandle) @@ -156,17 +178,27 @@ func _fs(g *gin.RouterGroup) { g.POST("/copy", handles.FsCopy) g.POST("/remove", handles.FsRemove) g.POST("/remove_empty_directory", handles.FsRemoveEmptyDirectory) - g.PUT("/put", middlewares.FsUp, handles.FsStream) - g.PUT("/form", middlewares.FsUp, handles.FsForm) + uploadLimiter := middlewares.UploadRateLimiter(stream.ClientUploadLimit) + g.PUT("/put", middlewares.FsUp, uploadLimiter, handles.FsStream) + g.PUT("/form", middlewares.FsUp, uploadLimiter, handles.FsForm) g.POST("/link", middlewares.AuthAdmin, handles.Link) - //g.POST("/add_aria2", handles.AddOfflineDownload) - //g.POST("/add_qbit", handles.AddQbittorrent) + // g.POST("/add_aria2", handles.AddOfflineDownload) + // g.POST("/add_qbit", handles.AddQbittorrent) + // g.POST("/add_transmission", handles.SetTransmission) g.POST("/add_offline_download", handles.AddOfflineDownload) + a := g.Group("/archive") + a.Any("/meta", handles.FsArchiveMeta) + a.Any("/list", handles.FsArchiveList) + a.POST("/decompress", handles.FsArchiveDecompress) +} + +func _task(g *gin.RouterGroup) { + handles.SetupTaskRoute(g) } func Cors(r *gin.Engine) { config := cors.DefaultConfig() - //config.AllowAllOrigins = true + // config.AllowAllOrigins = true config.AllowOrigins = conf.Conf.Cors.AllowOrigins config.AllowHeaders = conf.Conf.Cors.AllowHeaders config.AllowMethods = conf.Conf.Cors.AllowMethods diff --git a/server/s3/backend.go b/server/s3/backend.go index e0cfd967..a1e99044 100644 --- a/server/s3/backend.go +++ b/server/s3/backend.go @@ -6,13 +6,14 @@ import ( "context" "encoding/hex" "fmt" - "github.com/pkg/errors" "io" "path" "strings" "sync" "time" + "github.com/pkg/errors" + "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/model" @@ -173,20 +174,28 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string if link.RangeReadCloser == nil && link.MFile == nil && len(link.URL) == 0 { return nil, fmt.Errorf("the remote storage driver need to be enhanced to support s3") } - remoteFileSize := file.GetSize() - remoteClosers := utils.EmptyClosers() - rangeReaderFunc := func(ctx context.Context, start, length int64) (io.ReadCloser, error) { + + var rdr io.ReadCloser + length := int64(-1) + start := int64(0) + if rnge != nil { + start, length = rnge.Start, rnge.Length + } + // 参考 server/common/proxy.go + if link.MFile != nil { + _, err := link.MFile.Seek(start, io.SeekStart) + if err != nil { + return nil, err + } + rdr = link.MFile + } else { + remoteFileSize := file.GetSize() if length >= 0 && start+length >= remoteFileSize { length = -1 } rrc := link.RangeReadCloser if len(link.URL) > 0 { - - rangedRemoteLink := &model.Link{ - URL: link.URL, - Header: link.Header, - } - var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink) + var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, link) if err != nil { return nil, err } @@ -194,35 +203,12 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string } if rrc != nil { remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: start, Length: length}) - remoteClosers.AddClosers(rrc.GetClosers()) if err != nil { return nil, err } - return remoteReader, nil - } - if link.MFile != nil { - _, err := link.MFile.Seek(start, io.SeekStart) - if err != nil { - return nil, err - } - //remoteClosers.Add(remoteLink.MFile) - //keep reuse same MFile and close at last. - remoteClosers.Add(link.MFile) - return io.NopCloser(link.MFile), nil - } - return nil, errs.NotSupport - } - - var rdr io.ReadCloser - if rnge != nil { - rdr, err = rangeReaderFunc(ctx, rnge.Start, rnge.Length) - if err != nil { - return nil, err - } - } else { - rdr, err = rangeReaderFunc(ctx, 0, -1) - if err != nil { - return nil, err + rdr = utils.ReadCloser{Reader: remoteReader, Closer: rrc} + } else { + return nil, errs.NotSupport } } diff --git a/server/sftp.go b/server/sftp.go new file mode 100644 index 00000000..42c676e8 --- /dev/null +++ b/server/sftp.go @@ -0,0 +1,140 @@ +package server + +import ( + "context" + "github.com/KirCute/sftpd-alist" + "github.com/alist-org/alist/v3/internal/conf" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/internal/setting" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/ftp" + "github.com/alist-org/alist/v3/server/sftp" + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + "net/http" + "time" +) + +type SftpDriver struct { + proxyHeader *http.Header + config *sftpd.Config +} + +func NewSftpDriver() (*SftpDriver, error) { + sftp.InitHostKey() + header := &http.Header{} + header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent)) + return &SftpDriver{ + proxyHeader: header, + }, nil +} + +func (d *SftpDriver) GetConfig() *sftpd.Config { + if d.config != nil { + return d.config + } + serverConfig := ssh.ServerConfig{ + NoClientAuth: true, + NoClientAuthCallback: d.NoClientAuth, + PasswordCallback: d.PasswordAuth, + PublicKeyCallback: d.PublicKeyAuth, + AuthLogCallback: d.AuthLogCallback, + BannerCallback: d.GetBanner, + } + for _, k := range sftp.SSHSigners { + serverConfig.AddHostKey(k) + } + d.config = &sftpd.Config{ + ServerConfig: serverConfig, + HostPort: conf.Conf.SFTP.Listen, + ErrorLogFunc: utils.Log.Error, + //DebugLogFunc: utils.Log.Debugf, + } + return d.config +} + +func (d *SftpDriver) GetFileSystem(sc *ssh.ServerConn) (sftpd.FileSystem, error) { + userObj, err := op.GetUserByName(sc.User()) + if err != nil { + return nil, err + } + ctx := context.Background() + ctx = context.WithValue(ctx, "user", userObj) + ctx = context.WithValue(ctx, "meta_pass", "") + ctx = context.WithValue(ctx, "client_ip", sc.RemoteAddr().String()) + ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader) + return &sftp.DriverAdapter{FtpDriver: ftp.NewAferoAdapter(ctx)}, nil +} + +func (d *SftpDriver) Close() { +} + +func (d *SftpDriver) NoClientAuth(conn ssh.ConnMetadata) (*ssh.Permissions, error) { + if conn.User() != "guest" { + return nil, errors.New("only guest is allowed to login without authorization") + } + guest, err := op.GetGuest() + if err != nil { + return nil, err + } + if guest.Disabled || !guest.CanFTPAccess() { + return nil, errors.New("user is not allowed to access via SFTP") + } + return nil, nil +} + +func (d *SftpDriver) PasswordAuth(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) { + userObj, err := op.GetUserByName(conn.User()) + if err != nil { + return nil, err + } + if userObj.Disabled || !userObj.CanFTPAccess() { + return nil, errors.New("user is not allowed to access via SFTP") + } + passHash := model.StaticHash(string(password)) + if err = userObj.ValidatePwdStaticHash(passHash); err != nil { + return nil, err + } + return nil, nil +} + +func (d *SftpDriver) PublicKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) { + userObj, err := op.GetUserByName(conn.User()) + if err != nil { + return nil, err + } + if userObj.Disabled || !userObj.CanFTPAccess() { + return nil, errors.New("user is not allowed to access via SFTP") + } + keys, _, err := op.GetSSHPublicKeyByUserId(userObj.ID, 1, -1) + if err != nil { + return nil, err + } + marshal := string(key.Marshal()) + for _, sk := range keys { + if marshal != sk.KeyStr { + pubKey, _, _, _, e := ssh.ParseAuthorizedKey([]byte(sk.KeyStr)) + if e != nil || marshal != string(pubKey.Marshal()) { + continue + } + } + sk.LastUsedTime = time.Now() + _ = op.UpdateSSHPublicKey(&sk) + return nil, nil + } + return nil, errors.New("public key refused") +} + +func (d *SftpDriver) AuthLogCallback(conn ssh.ConnMetadata, method string, err error) { + ip := conn.RemoteAddr().String() + if err == nil { + utils.Log.Infof("[SFTP] %s(%s) logged in via %s", conn.User(), ip, method) + } else if method != "none" { + utils.Log.Infof("[SFTP] %s(%s) tries logging in via %s but with error: %s", conn.User(), ip, method, err) + } +} + +func (d *SftpDriver) GetBanner(_ ssh.ConnMetadata) string { + return setting.GetStr(conf.Announcement) +} diff --git a/server/sftp/const.go b/server/sftp/const.go new file mode 100644 index 00000000..58bfe382 --- /dev/null +++ b/server/sftp/const.go @@ -0,0 +1,11 @@ +package sftp + +// From leffss/sftpd +const ( + SSH_FXF_READ = 0x00000001 + SSH_FXF_WRITE = 0x00000002 + SSH_FXF_APPEND = 0x00000004 + SSH_FXF_CREAT = 0x00000008 + SSH_FXF_TRUNC = 0x00000010 + SSH_FXF_EXCL = 0x00000020 +) diff --git a/server/sftp/hostkey.go b/server/sftp/hostkey.go new file mode 100644 index 00000000..0db103dd --- /dev/null +++ b/server/sftp/hostkey.go @@ -0,0 +1,105 @@ +package sftp + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "github.com/alist-org/alist/v3/cmd/flags" + "github.com/alist-org/alist/v3/pkg/utils" + "golang.org/x/crypto/ssh" + "os" + "path/filepath" +) + +var SSHSigners []ssh.Signer + +func InitHostKey() { + if SSHSigners != nil { + return + } + sshPath := filepath.Join(flags.DataDir, "ssh") + if !utils.Exists(sshPath) { + err := utils.CreateNestedDirectory(sshPath) + if err != nil { + utils.Log.Fatalf("failed to create ssh directory: %+v", err) + return + } + } + SSHSigners = make([]ssh.Signer, 0, 4) + if rsaKey, ok := LoadOrGenerateRSAHostKey(sshPath); ok { + SSHSigners = append(SSHSigners, rsaKey) + } + // TODO Add keys for other encryption algorithms +} + +func LoadOrGenerateRSAHostKey(parentDir string) (ssh.Signer, bool) { + privateKeyPath := filepath.Join(parentDir, "ssh_host_rsa_key") + publicKeyPath := filepath.Join(parentDir, "ssh_host_rsa_key.pub") + privateKeyBytes, err := os.ReadFile(privateKeyPath) + if err == nil { + var privateKey *rsa.PrivateKey + privateKey, err = rsaDecodePrivateKey(privateKeyBytes) + if err == nil { + var ret ssh.Signer + ret, err = ssh.NewSignerFromKey(privateKey) + if err == nil { + return ret, true + } + } + } + _ = os.Remove(privateKeyPath) + _ = os.Remove(publicKeyPath) + privateKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + utils.Log.Fatalf("failed to generate RSA private key: %+v", err) + return nil, false + } + publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + utils.Log.Fatalf("failed to generate RSA public key: %+v", err) + return nil, false + } + ret, err := ssh.NewSignerFromKey(privateKey) + if err != nil { + utils.Log.Fatalf("failed to generate RSA signer: %+v", err) + return nil, false + } + privateBytes := rsaEncodePrivateKey(privateKey) + publicBytes := ssh.MarshalAuthorizedKey(publicKey) + err = os.WriteFile(privateKeyPath, privateBytes, 0600) + if err != nil { + utils.Log.Fatalf("failed to write RSA private key to file: %+v", err) + return nil, false + } + err = os.WriteFile(publicKeyPath, publicBytes, 0644) + if err != nil { + _ = os.Remove(privateKeyPath) + utils.Log.Fatalf("failed to write RSA public key to file: %+v", err) + return nil, false + } + return ret, true +} + +func rsaEncodePrivateKey(privateKey *rsa.PrivateKey) []byte { + privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) + privateBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: privateKeyBytes, + } + return pem.EncodeToMemory(privateBlock) +} + +func rsaDecodePrivateKey(bytes []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(bytes) + if block == nil { + return nil, fmt.Errorf("failed to parse PEM block containing the key") + } + privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return privateKey, nil +} diff --git a/server/sftp/sftp.go b/server/sftp/sftp.go new file mode 100644 index 00000000..1ceb3f59 --- /dev/null +++ b/server/sftp/sftp.go @@ -0,0 +1,123 @@ +package sftp + +import ( + "github.com/KirCute/sftpd-alist" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/alist-org/alist/v3/server/ftp" + "os" +) + +type DriverAdapter struct { + FtpDriver *ftp.AferoAdapter +} + +func (s *DriverAdapter) OpenFile(_ string, _ uint32, _ *sftpd.Attr) (sftpd.File, error) { + // See also GetHandle + return nil, errs.NotImplement +} + +func (s *DriverAdapter) OpenDir(_ string) (sftpd.Dir, error) { + // See also GetHandle + return nil, errs.NotImplement +} + +func (s *DriverAdapter) Remove(name string) error { + return s.FtpDriver.Remove(name) +} + +func (s *DriverAdapter) Rename(old, new string, _ uint32) error { + return s.FtpDriver.Rename(old, new) +} + +func (s *DriverAdapter) Mkdir(name string, attr *sftpd.Attr) error { + return s.FtpDriver.Mkdir(name, attr.Mode) +} + +func (s *DriverAdapter) Rmdir(name string) error { + return s.Remove(name) +} + +func (s *DriverAdapter) Stat(name string, _ bool) (*sftpd.Attr, error) { + stat, err := s.FtpDriver.Stat(name) + if err != nil { + return nil, err + } + return fileInfoToSftpAttr(stat), nil +} + +func (s *DriverAdapter) SetStat(_ string, _ *sftpd.Attr) error { + return errs.NotSupport +} + +func (s *DriverAdapter) ReadLink(_ string) (string, error) { + return "", errs.NotSupport +} + +func (s *DriverAdapter) CreateLink(_, _ string, _ uint32) error { + return errs.NotSupport +} + +func (s *DriverAdapter) RealPath(path string) (string, error) { + return utils.FixAndCleanPath(path), nil +} + +func (s *DriverAdapter) GetHandle(name string, flags uint32, _ *sftpd.Attr, offset uint64) (sftpd.FileTransfer, error) { + return s.FtpDriver.GetHandle(name, sftpFlagToOpenMode(flags), int64(offset)) +} + +func (s *DriverAdapter) ReadDir(name string) ([]sftpd.NamedAttr, error) { + dir, err := s.FtpDriver.ReadDir(name) + if err != nil { + return nil, err + } + ret := make([]sftpd.NamedAttr, len(dir)) + for i, d := range dir { + ret[i] = *fileInfoToSftpNamedAttr(d) + } + return ret, nil +} + +// From leffss/sftpd +func sftpFlagToOpenMode(flags uint32) int { + mode := 0 + if (flags & SSH_FXF_READ) != 0 { + mode |= os.O_RDONLY + } + if (flags & SSH_FXF_WRITE) != 0 { + mode |= os.O_WRONLY + } + if (flags & SSH_FXF_APPEND) != 0 { + mode |= os.O_APPEND + } + if (flags & SSH_FXF_CREAT) != 0 { + mode |= os.O_CREATE + } + if (flags & SSH_FXF_TRUNC) != 0 { + mode |= os.O_TRUNC + } + if (flags & SSH_FXF_EXCL) != 0 { + mode |= os.O_EXCL + } + return mode +} + +func fileInfoToSftpAttr(stat os.FileInfo) *sftpd.Attr { + ret := &sftpd.Attr{} + ret.Flags |= sftpd.ATTR_SIZE + ret.Size = uint64(stat.Size()) + ret.Flags |= sftpd.ATTR_MODE + ret.Mode = stat.Mode() + ret.Flags |= sftpd.ATTR_TIME + ret.ATime = stat.Sys().(model.Obj).CreateTime() + ret.MTime = stat.ModTime() + return ret +} + +func fileInfoToSftpNamedAttr(stat os.FileInfo) *sftpd.NamedAttr { + return &sftpd.NamedAttr{ + Name: stat.Name(), + Attr: *fileInfoToSftpAttr(stat), + } +} diff --git a/server/static/static.go b/server/static/static.go index ec16014c..d5d6ff68 100644 --- a/server/static/static.go +++ b/server/static/static.go @@ -102,6 +102,10 @@ func Static(r *gin.RouterGroup, noRoute func(handlers ...gin.HandlerFunc)) { } noRoute(func(c *gin.Context) { + if c.Request.Method != "GET" && c.Request.Method != "POST" { + c.Status(405) + return + } c.Header("Content-Type", "text/html") c.Status(200) if strings.HasPrefix(c.Request.URL.Path, "/@manage") { diff --git a/server/webdav.go b/server/webdav.go index 2b5c9618..a735e285 100644 --- a/server/webdav.go +++ b/server/webdav.go @@ -3,6 +3,8 @@ package server import ( "context" "crypto/subtle" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/server/middlewares" "net/http" "path" "strings" @@ -11,7 +13,6 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/setting" - "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/webdav" "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" @@ -28,8 +29,10 @@ func WebDav(dav *gin.RouterGroup) { }, } dav.Use(WebDAVAuth) - dav.Any("/*path", ServeWebDAV) - dav.Any("", ServeWebDAV) + uploadLimiter := middlewares.UploadRateLimiter(stream.ClientUploadLimit) + downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit) + dav.Any("/*path", uploadLimiter, downloadLimiter, ServeWebDAV) + dav.Any("", uploadLimiter, downloadLimiter, ServeWebDAV) dav.Handle("PROPFIND", "/*path", ServeWebDAV) dav.Handle("PROPFIND", "", ServeWebDAV) dav.Handle("MKCOL", "/*path", ServeWebDAV) @@ -99,12 +102,27 @@ func WebDAVAuth(c *gin.Context) { c.Abort() return } - if !user.CanWebdavManage() && utils.SliceContains([]string{"PUT", "DELETE", "PROPPATCH", "MKCOL", "COPY", "MOVE"}, c.Request.Method) { - if c.Request.Method == "OPTIONS" { - c.Set("user", guest) - c.Next() - return - } + if (c.Request.Method == "PUT" || c.Request.Method == "MKCOL") && (!user.CanWebdavManage() || !user.CanWrite()) { + c.Status(http.StatusForbidden) + c.Abort() + return + } + if c.Request.Method == "MOVE" && (!user.CanWebdavManage() || (!user.CanMove() && !user.CanRename())) { + c.Status(http.StatusForbidden) + c.Abort() + return + } + if c.Request.Method == "COPY" && (!user.CanWebdavManage() || !user.CanCopy()) { + c.Status(http.StatusForbidden) + c.Abort() + return + } + if c.Request.Method == "DELETE" && (!user.CanWebdavManage() || !user.CanRemove()) { + c.Status(http.StatusForbidden) + c.Abort() + return + } + if c.Request.Method == "PROPPATCH" && !user.CanWebdavManage() { c.Status(http.StatusForbidden) c.Abort() return diff --git a/server/webdav/file.go b/server/webdav/file.go index 01e96f7d..ac8f5c1c 100644 --- a/server/webdav/file.go +++ b/server/webdav/file.go @@ -33,6 +33,13 @@ func moveFiles(ctx context.Context, src, dst string, overwrite bool) (status int dstDir := path.Dir(dst) srcName := path.Base(src) dstName := path.Base(dst) + user := ctx.Value("user").(*model.User) + if srcDir != dstDir && !user.CanMove() { + return http.StatusForbidden, nil + } + if srcName != dstName && !user.CanRename() { + return http.StatusForbidden, nil + } if srcDir == dstDir { err = fs.Rename(ctx, src, dstName) } else { diff --git a/server/webdav/prop.go b/server/webdav/prop.go index b1474ea3..a81f31b0 100644 --- a/server/webdav/prop.go +++ b/server/webdav/prop.go @@ -18,6 +18,7 @@ import ( "time" "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/server/common" ) // Proppatch describes a property update instruction as defined in RFC 4918. @@ -101,7 +102,7 @@ type DeadPropsHolder interface { Patch([]Proppatch) ([]Propstat, error) } -// liveProps contains all supported, protected DAV: properties. +// liveProps contains all supported properties. var liveProps = map[xml.Name]struct { // findFn implements the propfind function of this property. If nil, // it indicates a hidden property. @@ -160,6 +161,10 @@ var liveProps = map[xml.Name]struct { findFn: findSupportedLock, dir: true, }, + {Space: "http://owncloud.org/ns", Local: "checksums"}: { + findFn: findChecksums, + dir: false, + }, } // TODO(nigeltao) merge props and allprop? @@ -473,7 +478,7 @@ func findETag(ctx context.Context, ls LockSystem, name string, fi model.Obj) (st // The Apache http 2.4 web server by default concatenates the // modification time and size of a file. We replicate the heuristic // with nanosecond granularity. - return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.GetSize()), nil + return common.GetEtag(fi), nil } func findSupportedLock(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) { @@ -483,3 +488,11 @@ func findSupportedLock(ctx context.Context, ls LockSystem, name string, fi model `` + ``, nil } + +func findChecksums(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) { + checksums := "" + for hashType, hashValue := range fi.GetHash().All() { + checksums += fmt.Sprintf("%s:%s", hashType.Name, hashValue) + } + return checksums, nil +} diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index b84e65b0..f22e15aa 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -24,7 +24,6 @@ import ( "github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/server/common" - log "github.com/sirupsen/logrus" ) type Handler struct { @@ -59,7 +58,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { status, err = h.handleOptions(brw, r) case "GET", "HEAD", "POST": useBufferedWriter = false - status, err = h.handleGetHeadPost(w, r) + Writer := &common.WrittenResponseWriter{ResponseWriter: w} + status, err = h.handleGetHeadPost(Writer, r) + if status != 0 && Writer.IsWritten() { + status = 0 + } case "DELETE": status, err = h.handleDelete(brw, r) case "PUT": @@ -227,11 +230,6 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta if err != nil { return http.StatusNotFound, err } - etag, err := findETag(ctx, h.LockSystem, reqPath, fi) - if err != nil { - return http.StatusInternalServerError, err - } - w.Header().Set("ETag", etag) if r.Method == http.MethodHead { w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.GetSize())) return http.StatusOK, nil @@ -252,8 +250,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta } err = common.Proxy(w, r, link, fi) if err != nil { - log.Errorf("webdav proxy error: %+v", err) - return http.StatusInternalServerError, err + return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err) } } else if storage.GetStorage().WebdavProxy() && downProxyUrl != "" { u := fmt.Sprintf("%s%s?sign=%s", @@ -263,7 +260,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta w.Header().Set("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate") http.Redirect(w, r, u, http.StatusFound) } else { - link, _, err := fs.Link(ctx, reqPath, model.LinkArgs{IP: utils.ClientIP(r), Header: r.Header, HttpReq: r}) + link, _, err := fs.Link(ctx, reqPath, model.LinkArgs{IP: utils.ClientIP(r), Header: r.Header, HttpReq: r, Redirect: true}) if err != nil { return http.StatusInternalServerError, err } @@ -361,7 +358,7 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, if err != nil { return http.StatusInternalServerError, err } - w.Header().Set("ETag", etag) + w.Header().Set("Etag", etag) return http.StatusCreated, nil }