Compare commits

..

7 Commits

Author SHA1 Message Date
mertalev
5ebb8e1a33 use typeorm entities for kysely types 2025-01-02 18:20:13 -05:00
mertalev
88bf0615fe add count option to withStack 2025-01-02 14:52:09 -05:00
mertalev
c91a139eeb fixes for sync queries 2025-01-02 14:52:09 -05:00
mertalev
7ed62d4149 unnecessary parentheses 2025-01-02 14:52:09 -05:00
mertalev
aa7f486259 closure begone 2025-01-02 14:52:09 -05:00
mertalev
4731f271e2 use min year for memory time series, sql generation fixes 2025-01-02 14:52:09 -05:00
mertalev
38a82d39d3 wip 2025-01-02 14:52:09 -05:00
1311 changed files with 27948 additions and 70198 deletions

View File

@@ -1,2 +0,0 @@
.env
library

View File

@@ -1,16 +1,2 @@
ARG BASEIMAGE=mcr.microsoft.com/devcontainers/typescript-node:22@sha256:9791f4aa527774bc370c6bd2f6705ce5a686f1e6f204badd8dfaacce28c631ae
FROM ${BASEIMAGE}
# Flutter SDK
# https://flutter.dev/docs/development/tools/sdk/releases?tab=linux
ENV FLUTTER_CHANNEL="stable"
ENV FLUTTER_VERSION="3.24.5"
ENV FLUTTER_HOME=/flutter
ENV PATH=${PATH}:${FLUTTER_HOME}/bin
# Flutter SDK
RUN mkdir -p ${FLUTTER_HOME} \
&& curl -C - --output flutter.tar.xz https://storage.googleapis.com/flutter_infra_release/releases/${FLUTTER_CHANNEL}/linux/flutter_linux_${FLUTTER_VERSION}-${FLUTTER_CHANNEL}.tar.xz \
&& tar -xf flutter.tar.xz --strip-components=1 -C ${FLUTTER_HOME} \
&& rm flutter.tar.xz \
&& chown -R 1000:1000 ${FLUTTER_HOME}

View File

@@ -1,26 +1,20 @@
{
"name": "Immich",
"service": "immich-devcontainer",
"dockerComposeFile": [
"docker-compose.yml",
"../docker/docker-compose.dev.yml"
],
"customizations": {
"vscode": {
"extensions": [
"Dart-Code.dart-code",
"Dart-Code.flutter",
"dbaeumer.vscode-eslint",
"dcmdev.dcm-vscode-extension",
"esbenp.prettier-vscode",
"svelte.svelte-vscode"
]
}
},
"forwardPorts": [],
"initializeCommand": "bash .devcontainer/scripts/initializeCommand.sh",
"onCreateCommand": "bash .devcontainer/scripts/onCreateCommand.sh",
"overrideCommand": true,
"workspaceFolder": "/immich",
"remoteUser": "node"
"name": "Immich devcontainers",
"build": {
"dockerfile": "Dockerfile",
"args": {
"BASEIMAGE": "mcr.microsoft.com/devcontainers/typescript-node:22"
}
},
"customizations": {
"vscode": {
"extensions": [
"svelte.svelte-vscode"
]
}
},
"forwardPorts": [],
"postCreateCommand": "make install-all",
"remoteUser": "node"
}

View File

@@ -1,8 +0,0 @@
services:
immich-devcontainer:
build:
dockerfile: Dockerfile
extra_hosts:
- 'host.docker.internal:host-gateway'
volumes:
- ..:/immich:cached

View File

@@ -1,6 +0,0 @@
#!/bin/bash
# If .env file does not exist, create it by copying example.env from the docker folder
if [ ! -f ".devcontainer/.env" ]; then
cp docker/example.env .devcontainer/.env
fi

View File

@@ -1,25 +0,0 @@
#!/bin/bash
# Enable multiarch for arm64 if necessary
if [ "$(dpkg --print-architecture)" = "arm64" ]; then
sudo dpkg --add-architecture amd64 && \
sudo apt-get update && \
sudo apt-get install -y --no-install-recommends \
qemu-user-static \
libc6:amd64 \
libstdc++6:amd64 \
libgcc1:amd64
fi
# Install DCM
wget -qO- https://dcm.dev/pgp-key.public | sudo gpg --dearmor -o /usr/share/keyrings/dcm.gpg
sudo echo 'deb [signed-by=/usr/share/keyrings/dcm.gpg arch=amd64] https://dcm.dev/debian stable main' | sudo tee /etc/apt/sources.list.d/dart_stable.list
sudo apt-get update
sudo apt-get install dcm
dart --disable-analytics
# Install immich
cd /immich || exit
make install-all

View File

@@ -11,7 +11,7 @@ body:
- type: checkboxes
attributes:
label: I have searched the existing feature requests, both open and closed, to make sure this is not a duplicate request.
label: I have searched the existing feature requests to make sure this is not a duplicate request.
options:
- label: "Yes"
required: true

2
.github/FUNDING.yml vendored
View File

@@ -1 +1 @@
custom: ['https://buy.immich.app', 'https://immich.store']
custom: ['https://buy.immich.app']

View File

@@ -1,13 +1,6 @@
name: Report an issue with Immich
description: Report an issue with Immich
body:
- type: checkboxes
attributes:
label: I have searched the existing issues, both open and closed, to make sure this is not a duplicate report.
options:
- label: "Yes"
required: true
- type: markdown
attributes:
value: |

View File

@@ -1 +1,2 @@
blank_issues_enabled: false
blank_pull_request_template_enabled: false

View File

@@ -0,0 +1,22 @@
## Description
<!--- Describe your changes in detail -->
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. -->
Fixes # (issue)
## How Has This Been Tested?
<!-- Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration -->
- [ ] Test A
- [ ] Test B
## Screenshots (if appropriate):
## Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have made corresponding changes to the documentation if applicable

View File

@@ -1,36 +0,0 @@
## Description
<!--- Describe your changes in detail -->
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. -->
Fixes # (issue)
## How Has This Been Tested?
<!-- Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration -->
- [ ] Test A
- [ ] Test B
<details><summary><h2>Screenshots (if appropriate)</h2></summary>
<!-- Images go below this line. -->
</details>
<!-- API endpoint changes (if relevant)
## API Changes
The `/api/something` endpoint is now `/api/something-else`
-->
## Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have made corresponding changes to the documentation if applicable
- [ ] I have no unrelated changes in the PR.
- [ ] I have confirmed that any new dependencies are strictly necessary.
- [ ] I have written tests for new code (if applicable)
- [ ] I have followed naming conventions/patterns in the surrounding code
- [ ] All code in `src/services` uses repositories implementations for database calls, filesystem operations, etc.
- [ ] All code in `src/repositories/` is pretty basic/simple and does not have any immich specific logic (that belongs in `src/services`)

View File

@@ -29,11 +29,9 @@ jobs:
filters: |
mobile:
- 'mobile/**'
workflow:
- '.github/workflows/build-mobile.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
build-sign-android:
name: Build and sign Android

View File

@@ -56,10 +56,10 @@ jobs:
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.5.0
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.10.0
uses: docker/setup-buildx-action@v3.8.0
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@@ -88,7 +88,7 @@ jobs:
type=raw,value=latest,enable=${{ github.event_name == 'release' }}
- name: Build and push image
uses: docker/build-push-action@v6.15.0
uses: docker/build-push-action@v6.10.0
with:
file: cli/Dockerfile
platforms: linux/amd64,linux/arm64

73
.github/workflows/docker-cleanup.yml vendored Normal file
View File

@@ -0,0 +1,73 @@
# This workflow runs on certain conditions to check for and potentially
# delete container images from the GHCR which no longer have an associated
# code branch.
# Requires a PAT with the correct scope set in the secrets.
#
# This workflow will not trigger runs on forked repos.
name: Docker Cleanup
on:
pull_request:
types:
- "closed"
push:
paths:
- ".github/workflows/docker-cleanup.yml"
concurrency:
group: registry-tags-cleanup
cancel-in-progress: false
jobs:
cleanup-images:
name: Cleanup Stale Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
include:
- primary-name: "immich-server"
- primary-name: "immich-machine-learning"
env:
# Requires a personal access token with the OAuth scope delete:packages
TOKEN: ${{ secrets.PACKAGE_DELETE_TOKEN }}
steps:
- name: Clean temporary images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/ephemeral@v0.9.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
is_org: "true"
do_delete: "true"
package_name: "${{ matrix.primary-name }}"
scheme: "pull_request"
repo_name: "immich"
match_regex: '^pr-(\d+)$|^(\d+)$'
cleanup-untagged-images:
name: Cleanup Untagged Images Tags for ${{ matrix.primary-name }}
runs-on: ubuntu-24.04
needs:
- cleanup-images
strategy:
fail-fast: false
matrix:
include:
- primary-name: "immich-server"
- primary-name: "immich-machine-learning"
- primary-name: "immich-build-cache"
env:
# Requires a personal access token with the OAuth scope delete:packages
TOKEN: ${{ secrets.PACKAGE_DELETE_TOKEN }}
steps:
- name: Clean untagged images
if: "${{ env.TOKEN != '' }}"
uses: stumpylog/image-cleaner-action/untagged@v0.9.0
with:
token: "${{ env.TOKEN }}"
owner: "immich-app"
do_delete: "true"
is_org: "true"
package_name: "${{ matrix.primary-name }}"

View File

@@ -5,6 +5,7 @@ on:
push:
branches: [main]
pull_request:
branches: [main]
release:
types: [published]
@@ -35,12 +36,10 @@ jobs:
- 'i18n/**'
machine-learning:
- 'machine-learning/**'
workflow:
- '.github/workflows/docker.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
retag_ml:
name: Re-Tag ML
@@ -62,10 +61,8 @@ jobs:
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-machine-learning
TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
retag_server:
name: Re-Tag Server
@@ -87,100 +84,107 @@ jobs:
REGISTRY_NAME="ghcr.io"
REPOSITORY=${{ github.repository_owner }}/immich-server
TAG_OLD=main${{ matrix.suffix }}
TAG_PR=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
TAG_COMMIT=commit-${{ github.event_name != 'pull_request' && github.sha || github.event.pull_request.head.sha }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_PR $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_COMMIT $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
TAG_NEW=${{ github.event.number == 0 && github.ref_name || format('pr-{0}', github.event.number) }}${{ matrix.suffix }}
docker buildx imagetools create -t $REGISTRY_NAME/$REPOSITORY:$TAG_NEW $REGISTRY_NAME/$REPOSITORY:$TAG_OLD
build_and_push_ml:
name: Build and Push ML
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' }}
runs-on: ${{ matrix.runner }}
runs-on: ubuntu-latest
env:
image: immich-machine-learning
context: machine-learning
file: machine-learning/Dockerfile
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
strategy:
# Prevent a failure in one image from stopping the other builds
fail-fast: false
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
- platforms: linux/amd64,linux/arm64
device: cpu
- platform: linux/arm64
runner: ubuntu-24.04-arm
device: cpu
- platform: linux/amd64
runner: ubuntu-latest
- platforms: linux/amd64
device: cuda
suffix: -cuda
- platform: linux/amd64
runner: ubuntu-latest
- platforms: linux/amd64
device: openvino
suffix: -openvino
- platform: linux/arm64
runner: ubuntu-24.04-arm
- platforms: linux/arm64
device: armnn
suffix: -armnn
steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3.10.0
uses: docker/setup-buildx-action@v3.8.0
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate cache key suffix
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV
else
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV
fi
- name: Generate docker image tags
id: metadata
uses: docker/metadata-action@v5
with:
flavor: |
# Disable latest tag
latest=false
images: |
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Generate cache target
- name: Determine build cache output
id: cache-target
run: |
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
# Essentially just ignore the cache output (forks can't write to registry cache)
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
id: build
uses: docker/build-push-action@v6.15.0
uses: docker/build-push-action@v6.10.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platforms }}
labels: ${{ steps.metadata.outputs.labels }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
cache-from: |
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-main
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
@@ -188,249 +192,100 @@ jobs:
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: ml-digests-${{ matrix.device }}-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge_ml:
name: Merge & Push ML
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run_ml == 'true' && !github.event.pull_request.head.repo.fork }}
env:
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-machine-learning
DOCKER_REPO: altran1502/immich-machine-learning
strategy:
matrix:
include:
- device: cpu
- device: cuda
suffix: -cuda
- device: openvino
suffix: -openvino
- device: armnn
suffix: -armnn
needs:
- build_and_push_ml
steps:
- name: Download digests
uses: actions/download-artifact@v4
with:
path: ${{ runner.temp }}/digests
pattern: ml-digests-${{ matrix.device }}-*
merge-multiple: true
- name: Login to Docker Hub
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Generate docker image tags
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_PR_HEAD_SHA: "true"
with:
flavor: |
# Disable latest tag
latest=false
images: |
name=${{ env.GHCR_REPO }}
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with long commit sha hash
type=sha,format=long,prefix=commit-,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
build_and_push_server:
name: Build and Push Server
runs-on: ${{ matrix.runner }}
runs-on: ubuntu-latest
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
env:
image: immich-server
context: .
file: server/Dockerfile
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
- platform: linux/arm64
runner: ubuntu-24.04-arm
- platforms: linux/amd64,linux/arm64
device: cpu
steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.2.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate cache key suffix
run: |
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "CACHE_KEY_SUFFIX=pr-${{ github.event.number }}" >> $GITHUB_ENV
else
echo "CACHE_KEY_SUFFIX=$(echo ${{ github.ref_name }} | sed 's/[^a-zA-Z0-9]/-/g')" >> $GITHUB_ENV
fi
- name: Generate cache target
id: cache-target
run: |
if [[ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]]; then
# Essentially just ignore the cache output (forks can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ matrix.device }}-${{ env.CACHE_KEY_SUFFIX }},mode=max,compression=zstd" >> $GITHUB_OUTPUT
fi
- name: Build and push image
id: build
uses: docker/build-push-action@v6.15.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platform }}
labels: ${{ steps.metadata.outputs.labels }}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
cache-from: |
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-${{ env.CACHE_KEY_SUFFIX }}
type=registry,ref=${{ env.GHCR_REPO }}-build-cache:${{ env.PLATFORM_PAIR }}-main
outputs: type=image,"name=${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=${{ !github.event.pull_request.head.repo.fork }}
build-args: |
DEVICE=cpu
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
- name: Export digest
run: |
mkdir -p ${{ runner.temp }}/digests
digest="${{ steps.build.outputs.digest }}"
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: server-digests-${{ env.PLATFORM_PAIR }}
path: ${{ runner.temp }}/digests/*
if-no-files-found: error
retention-days: 1
merge_server:
name: Merge & Push Server
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run_server == 'true' && !github.event.pull_request.head.repo.fork }}
env:
GHCR_REPO: ghcr.io/${{ github.repository_owner }}/immich-server
DOCKER_REPO: altran1502/immich-server
needs:
- build_and_push_server
steps:
- name: Download digests
uses: actions/download-artifact@v4
with:
path: ${{ runner.temp }}/digests
pattern: server-digests-*
merge-multiple: true
uses: docker/setup-buildx-action@v3.8.0
- name: Login to Docker Hub
# Only push to Docker Hub when making a release
if: ${{ github.event_name == 'release' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
# Skip when PR from a fork
if: ${{ !github.event.pull_request.head.repo.fork }}
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Generate docker image tags
id: meta
id: metadata
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_PR_HEAD_SHA: "true"
with:
flavor: |
# Disable latest tag
latest=false
images: |
name=${{ env.GHCR_REPO }}
name=${{ env.DOCKER_REPO }},enable=${{ github.event_name == 'release' }}
name=ghcr.io/${{ github.repository_owner }}/${{env.image}}
name=altran1502/${{env.image}},enable=${{ github.event_name == 'release' }}
tags: |
# Tag with branch name
type=ref,event=branch,suffix=${{ matrix.suffix }}
# Tag with pr-number
type=ref,event=pr,suffix=${{ matrix.suffix }}
# Tag with long commit sha hash
type=sha,format=long,prefix=commit-,suffix=${{ matrix.suffix }}
# Tag with git tag on release
type=ref,event=tag,suffix=${{ matrix.suffix }}
type=raw,value=release,enable=${{ github.event_name == 'release' }},suffix=${{ matrix.suffix }}
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
- name: Determine build cache output
id: cache-target
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.GHCR_REPO }}@sha256:%s ' *)
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
# Essentially just ignore the cache output (PR can't write to registry cache)
echo "cache-to=type=local,dest=/tmp/discard,ignore-error=true" >> $GITHUB_OUTPUT
else
echo "cache-to=type=registry,mode=max,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{ env.image }}" >> $GITHUB_OUTPUT
fi
- name: Build and push image
uses: docker/build-push-action@v6.10.0
with:
context: ${{ env.context }}
file: ${{ env.file }}
platforms: ${{ matrix.platforms }}
# Skip pushing when PR from a fork
push: ${{ !github.event.pull_request.head.repo.fork }}
cache-from: type=registry,ref=ghcr.io/${{ github.repository_owner }}/immich-build-cache:${{env.image}}
cache-to: ${{ steps.cache-target.outputs.cache-to }}
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
build-args: |
DEVICE=${{ matrix.device }}
BUILD_ID=${{ github.run_id }}
BUILD_IMAGE=${{ github.event_name == 'release' && github.ref_name || steps.metadata.outputs.tags }}
BUILD_SOURCE_REF=${{ github.ref_name }}
BUILD_SOURCE_COMMIT=${{ github.sha }}
success-check-server:
name: Docker Build & Push Server Success
needs: [merge_server, retag_server]
needs: [build_and_push_server, retag_server]
runs-on: ubuntu-latest
if: always()
steps:
@@ -443,7 +298,7 @@ jobs:
success-check-ml:
name: Docker Build & Push ML Success
needs: [merge_ml, retag_ml]
needs: [build_and_push_ml, retag_ml]
runs-on: ubuntu-latest
if: always()
steps:

View File

@@ -15,7 +15,7 @@ jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
should_run: ${{ steps.found_paths.outputs.docs == 'true' || steps.should_force.outputs.should_force == 'true' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -25,11 +25,9 @@ jobs:
filters: |
docs:
- 'docs/**'
workflow:
- '.github/workflows/docs-build.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'release' || github.ref_name == 'main' }}" >> "$GITHUB_OUTPUT"
build:
name: Docs Build

View File

@@ -68,17 +68,10 @@ jobs:
needs: build_mobile
steps:
- name: Generate a token
id: generate-token
uses: actions/create-github-app-token@v1
with:
app-id: ${{ secrets.PUSH_O_MATIC_APP_ID }}
private-key: ${{ secrets.PUSH_O_MATIC_APP_KEY }}
- name: Checkout
uses: actions/checkout@v4
with:
token: ${{ steps.generate-token.outputs.token }}
token: ${{ secrets.ORG_RELEASE_TOKEN }}
- name: Download APK
uses: actions/download-artifact@v4

View File

@@ -1,33 +0,0 @@
name: Preview label
on:
pull_request:
types: [labeled]
jobs:
comment-status:
runs-on: ubuntu-latest
if: ${{ github.event.action == 'labeled' && github.event.label.name == 'preview' }}
permissions:
pull-requests: write
steps:
- uses: mshick/add-pr-comment@v2
with:
message-id: "preview-status"
message: "Deploying preview environment to https://pr-${{ github.event.pull_request.number }}.preview.internal.immich.cloud/"
remove-label:
runs-on: ubuntu-latest
if: ${{ github.event.action == 'closed' && contains(github.event.pull_request.labels.*.name, 'preview') }}
permissions:
pull-requests: write
steps:
- uses: actions/github-script@v7
with:
script: |
github.rest.issues.removeLabel({
issue_number: context.payload.pull_request.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: 'preview'
})

View File

@@ -23,11 +23,9 @@ jobs:
filters: |
mobile:
- 'mobile/**'
workflow:
- '.github/workflows/static_analysis.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'release' }}" >> "$GITHUB_OUTPUT"
mobile-dart-analyze:
name: Run Dart Code Analysis

View File

@@ -43,12 +43,10 @@ jobs:
- 'mobile/**'
machine-learning:
- 'machine-learning/**'
workflow:
- '.github/workflows/test.yml'
- name: Check if we should force jobs to run
id: should_force
run: echo "should_force=${{ steps.found_paths.outputs.workflow == 'true' || github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
run: echo "should_force=${{ github.event_name == 'workflow_dispatch' }}" >> "$GITHUB_OUTPUT"
server-unit-tests:
name: Test & Lint Server
@@ -246,30 +244,25 @@ jobs:
run: npm run check
if: ${{ !cancelled() }}
server-medium-tests:
medium-tests-server:
name: Medium Tests (Server)
needs: pre-job
if: ${{ needs.pre-job.outputs.should_run_server == 'true' }}
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./server
runs-on: mich
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version-file: './server/.nvmrc'
submodules: 'recursive'
- name: Run npm install
run: npm ci
- name: Production build
if: ${{ !cancelled() }}
run: docker compose -f e2e/docker-compose.yml build
- name: Run medium tests
run: npm run test:medium
if: ${{ !cancelled() }}
run: make test-medium
e2e-tests-server-cli:
name: End-to-End Tests (Server & CLI)
@@ -457,7 +450,7 @@ jobs:
runs-on: ubuntu-latest
services:
postgres:
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
@@ -509,7 +502,6 @@ jobs:
run: |
echo "ERROR: Generated migration files not up to date!"
echo "Changed files: ${{ steps.verify-changed-files.outputs.changed_files }}"
cat ./src/migrations/*-TestMigration.ts
exit 1
- name: Run SQL generation

View File

@@ -1,50 +0,0 @@
name: Weblate checks
on:
pull_request:
branches: [main]
jobs:
pre-job:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.found_paths.outputs.i18n == 'true' && github.head_ref != 'chore/translations'}}
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: found_paths
uses: dorny/paths-filter@v3
with:
filters: |
i18n:
- 'i18n/!(en)**\.json'
enforce-lock:
name: Check Weblate Lock
runs-on: ubuntu-latest
if: ${{ needs.pre-job.outputs.should_run == 'true' }}
steps:
- name: Check weblate lock
run: |
if [[ "false" = $(curl https://hosted.weblate.org/api/components/immich/immich/lock/ | jq .locked) ]]; then
exit 1
fi
- name: Find Pull Request
uses: juliangruber/find-pull-request-action@v1
id: find-pr
with:
branch: chore/translations
- name: Fail if existing weblate PR
if: ${{ steps.find-pr.outputs.number }}
run: exit 1
success-check-lock:
name: Weblate Lock Check Success
needs: [ enforce-lock ]
runs-on: ubuntu-latest
if: always()
steps:
- name: Any jobs failed?
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: All jobs passed or skipped
if: ${{ !(contains(needs.*.result, 'failure')) }}
run: echo "All jobs passed or skipped" && echo "${{ toJSON(needs.*.result) }}"

View File

@@ -20,7 +20,10 @@
"editor.tabSize": 2
},
"svelte.enable-ts-plugin": true,
"eslint.validate": ["javascript", "svelte"],
"eslint.validate": [
"javascript",
"svelte"
],
"typescript.preferences.importModuleSpecifier": "non-relative",
"[dart]": {
"editor.formatOnSave": true,

View File

@@ -27,9 +27,6 @@ open-api:
open-api-dart:
cd ./open-api && bash ./bin/generate-open-api.sh dart
open-api-dart-2:
cd ./open-api && bash ./bin/generate-open-api.sh dart-2
open-api-typescript:
cd ./open-api && bash ./bin/generate-open-api.sh typescript

View File

@@ -29,7 +29,6 @@
<a href="readme_i18n/README_nl_NL.md">Nederlands</a>
<a href="readme_i18n/README_tr_TR.md">Türkçe</a>
<a href="readme_i18n/README_zh_CN.md">中文</a>
<a href="readme_i18n/README_uk_UA.md">Українська</a>
<a href="readme_i18n/README_ru_RU.md">Русский</a>
<a href="readme_i18n/README_pt_BR.md">Português Brasileiro</a>
<a href="readme_i18n/README_sv_SE.md">Svenska</a>

View File

@@ -1 +1 @@
22.14.0
22.12.0

View File

@@ -1,4 +1,4 @@
FROM node:22.14.0-alpine3.20@sha256:40be979442621049f40b1d51a26b55e281246b5de4e5f51a18da7beb6e17e3f9 AS core
FROM node:22.12.0-alpine3.20@sha256:96cc8323e25c8cc6ddcb8b965e135cfd57846e8003ec0d7bcec16c5fd5f6d39f AS core
WORKDIR /usr/src/open-api/typescript-sdk
COPY open-api/typescript-sdk/package*.json open-api/typescript-sdk/tsconfig*.json ./

1184
cli/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@immich/cli",
"version": "2.2.53",
"version": "2.2.37",
"description": "Command Line Interface (CLI) for Immich",
"type": "module",
"exports": "./dist/index.js",
@@ -19,27 +19,26 @@
"@types/byte-size": "^8.1.0",
"@types/cli-progress": "^3.11.0",
"@types/lodash-es": "^4.17.12",
"@types/micromatch": "^4.0.9",
"@types/mock-fs": "^4.13.1",
"@types/node": "^22.13.5",
"@types/node": "^22.10.2",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",
"@vitest/coverage-v8": "^2.0.5",
"byte-size": "^9.0.0",
"cli-progress": "^3.12.0",
"commander": "^12.0.0",
"eslint": "^9.14.0",
"eslint-config-prettier": "^10.0.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^56.0.1",
"globals": "^16.0.0",
"globals": "^15.9.0",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^4.0.0",
"typescript": "^5.3.3",
"vite": "^6.0.0",
"vite": "^5.0.12",
"vite-tsconfig-paths": "^5.0.0",
"vitest": "^3.0.0",
"vitest": "^2.0.5",
"vitest-fetch-mock": "^0.4.0",
"yaml": "^2.3.1"
},
@@ -63,13 +62,11 @@
"node": ">=20.0.0"
},
"dependencies": {
"chokidar": "^4.0.3",
"fast-glob": "^3.3.2",
"fastq": "^1.17.1",
"lodash-es": "^4.17.21",
"micromatch": "^4.0.8"
"lodash-es": "^4.17.21"
},
"volta": {
"node": "22.14.0"
"node": "22.12.0"
}
}

View File

@@ -1,13 +1,12 @@
import * as fs from 'node:fs';
import * as os from 'node:os';
import * as path from 'node:path';
import { setTimeout as sleep } from 'node:timers/promises';
import { describe, expect, it, MockedFunction, vi } from 'vitest';
import { describe, expect, it, vi } from 'vitest';
import { Action, checkBulkUpload, defaults, getSupportedMediaTypes, Reason } from '@immich/sdk';
import { Action, checkBulkUpload, defaults, Reason } from '@immich/sdk';
import createFetchMock from 'vitest-fetch-mock';
import { checkForDuplicates, getAlbumName, startWatch, uploadFiles, UploadOptionsDto } from 'src/commands/asset';
import { checkForDuplicates, getAlbumName, uploadFiles, UploadOptionsDto } from './asset';
vi.mock('@immich/sdk');
@@ -200,112 +199,3 @@ describe('checkForDuplicates', () => {
});
});
});
describe('startWatch', () => {
let testFolder: string;
let checkBulkUploadMocked: MockedFunction<typeof checkBulkUpload>;
beforeEach(async () => {
vi.restoreAllMocks();
vi.mocked(getSupportedMediaTypes).mockResolvedValue({
image: ['.jpg'],
sidecar: ['.xmp'],
video: ['.mp4'],
});
testFolder = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'test-startWatch-'));
checkBulkUploadMocked = vi.mocked(checkBulkUpload);
checkBulkUploadMocked.mockResolvedValue({
results: [],
});
});
it('should start watching a directory and upload new files', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
await startWatch([testFolder], { concurrency: 1 }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: [
expect.objectContaining({
id: testFilePath,
}),
],
},
});
});
it('should filter out unsupported files', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
const unsupportedFilePath = path.join(testFolder, 'test.txt');
await startWatch([testFolder], { concurrency: 1 }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await fs.promises.writeFile(unsupportedFilePath, 'testtxt');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: testFilePath,
}),
]),
},
});
expect(checkBulkUpload).not.toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: unsupportedFilePath,
}),
]),
},
});
});
it('should filger out ignored patterns', async () => {
const testFilePath = path.join(testFolder, 'test.jpg');
const ignoredPattern = 'ignored';
const ignoredFolder = path.join(testFolder, ignoredPattern);
await fs.promises.mkdir(ignoredFolder, { recursive: true });
const ignoredFilePath = path.join(ignoredFolder, 'ignored.jpg');
await startWatch([testFolder], { concurrency: 1, ignore: ignoredPattern }, { batchSize: 1, debounceTimeMs: 10 });
await sleep(100); // to debounce the watcher from considering the test file as a existing file
await fs.promises.writeFile(testFilePath, 'testjpg');
await fs.promises.writeFile(ignoredFilePath, 'ignoredjpg');
await vi.waitUntil(() => checkBulkUploadMocked.mock.calls.length > 0, 3000);
expect(checkBulkUpload).toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: testFilePath,
}),
]),
},
});
expect(checkBulkUpload).not.toHaveBeenCalledWith({
assetBulkUploadCheckDto: {
assets: expect.arrayContaining([
expect.objectContaining({
id: ignoredFilePath,
}),
]),
},
});
});
afterEach(async () => {
await fs.promises.rm(testFolder, { recursive: true, force: true });
});
});

View File

@@ -12,18 +12,13 @@ import {
getSupportedMediaTypes,
} from '@immich/sdk';
import byteSize from 'byte-size';
import { Matcher, watch as watchFs } from 'chokidar';
import { MultiBar, Presets, SingleBar } from 'cli-progress';
import { chunk } from 'lodash-es';
import micromatch from 'micromatch';
import { Stats, createReadStream } from 'node:fs';
import { stat, unlink } from 'node:fs/promises';
import path, { basename } from 'node:path';
import { Queue } from 'src/queue';
import { BaseOptions, Batcher, authenticate, crawl, sha1 } from 'src/utils';
const UPLOAD_WATCH_BATCH_SIZE = 100;
const UPLOAD_WATCH_DEBOUNCE_TIME_MS = 10_000;
import { BaseOptions, authenticate, crawl, sha1 } from 'src/utils';
const s = (count: number) => (count === 1 ? '' : 's');
@@ -41,8 +36,6 @@ export interface UploadOptionsDto {
albumName?: string;
includeHidden?: boolean;
concurrency: number;
progress?: boolean;
watch?: boolean;
}
class UploadFile extends File {
@@ -62,94 +55,19 @@ class UploadFile extends File {
}
}
const uploadBatch = async (files: string[], options: UploadOptionsDto) => {
const { newFiles, duplicates } = await checkForDuplicates(files, options);
const newAssets = await uploadFiles(newFiles, options);
await updateAlbums([...newAssets, ...duplicates], options);
await deleteFiles(newFiles, options);
};
export const startWatch = async (
paths: string[],
options: UploadOptionsDto,
{
batchSize = UPLOAD_WATCH_BATCH_SIZE,
debounceTimeMs = UPLOAD_WATCH_DEBOUNCE_TIME_MS,
}: { batchSize?: number; debounceTimeMs?: number } = {},
) => {
const watcherIgnored: Matcher[] = [];
const { image, video } = await getSupportedMediaTypes();
const extensions = new Set([...image, ...video]);
if (options.ignore) {
watcherIgnored.push((path) => micromatch.contains(path, `**/${options.ignore}`));
}
const pathsBatcher = new Batcher<string>({
batchSize,
debounceTimeMs,
onBatch: async (paths: string[]) => {
const uniquePaths = [...new Set(paths)];
await uploadBatch(uniquePaths, options);
},
});
const onFile = async (path: string, stats?: Stats) => {
if (stats?.isDirectory()) {
return;
}
const ext = '.' + path.split('.').pop()?.toLowerCase();
if (!ext || !extensions.has(ext)) {
return;
}
if (!options.progress) {
// logging when progress is disabled as it can cause issues with the progress bar rendering
console.log(`Change detected: ${path}`);
}
pathsBatcher.add(path);
};
const fsWatcher = watchFs(paths, {
ignoreInitial: true,
ignored: watcherIgnored,
alwaysStat: true,
awaitWriteFinish: true,
depth: options.recursive ? undefined : 1,
persistent: true,
})
.on('add', onFile)
.on('change', onFile)
.on('error', (error) => console.error(`Watcher error: ${error}`));
process.on('SIGINT', async () => {
console.log('Exiting...');
await fsWatcher.close();
process.exit();
});
};
export const upload = async (paths: string[], baseOptions: BaseOptions, options: UploadOptionsDto) => {
await authenticate(baseOptions);
const scanFiles = await scan(paths, options);
if (scanFiles.length === 0) {
if (options.watch) {
console.log('No files found initially.');
} else {
console.log('No files found, exiting');
return;
}
console.log('No files found, exiting');
return;
}
if (options.watch) {
console.log('Watching for changes...');
await startWatch(paths, options);
// watcher does not handle the initial scan
// as the scan() is a more efficient quick start with batched results
}
await uploadBatch(scanFiles, options);
const { newFiles, duplicates } = await checkForDuplicates(scanFiles, options);
const newAssets = await uploadFiles(newFiles, options);
await updateAlbums([...newAssets, ...duplicates], options);
await deleteFiles(newFiles, options);
};
const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
@@ -167,25 +85,19 @@ const scan = async (pathsToCrawl: string[], options: UploadOptionsDto) => {
return files;
};
export const checkForDuplicates = async (files: string[], { concurrency, skipHash, progress }: UploadOptionsDto) => {
export const checkForDuplicates = async (files: string[], { concurrency, skipHash }: UploadOptionsDto) => {
if (skipHash) {
console.log('Skipping hash check, assuming all files are new');
return { newFiles: files, duplicates: [] };
}
let multiBar: MultiBar | undefined;
const multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
if (progress) {
multiBar = new MultiBar(
{ format: '{message} | {bar} | {percentage}% | ETA: {eta}s | {value}/{total} assets' },
Presets.shades_classic,
);
} else {
console.log(`Received ${files.length} files, hashing...`);
}
const hashProgressBar = multiBar?.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar?.create(files.length, 0, { message: 'Checking for duplicates' });
const hashProgressBar = multiBar.create(files.length, 0, { message: 'Hashing files ' });
const checkProgressBar = multiBar.create(files.length, 0, { message: 'Checking for duplicates' });
const newFiles: string[] = [];
const duplicates: Asset[] = [];
@@ -205,7 +117,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
}
}
checkProgressBar?.increment(assets.length);
checkProgressBar.increment(assets.length);
},
{ concurrency, retry: 3 },
);
@@ -225,7 +137,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
void checkBulkUploadQueue.push(batch);
}
hashProgressBar?.increment();
hashProgressBar.increment();
return results;
},
{ concurrency, retry: 3 },
@@ -243,7 +155,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
await checkBulkUploadQueue.drained();
multiBar?.stop();
multiBar.stop();
console.log(`Found ${newFiles.length} new files and ${duplicates.length} duplicate${s(duplicates.length)}`);
@@ -259,10 +171,7 @@ export const checkForDuplicates = async (files: string[], { concurrency, skipHas
return { newFiles, duplicates };
};
export const uploadFiles = async (
files: string[],
{ dryRun, concurrency, progress }: UploadOptionsDto,
): Promise<Asset[]> => {
export const uploadFiles = async (files: string[], { dryRun, concurrency }: UploadOptionsDto): Promise<Asset[]> => {
if (files.length === 0) {
console.log('All assets were already uploaded, nothing to do.');
return [];
@@ -282,20 +191,12 @@ export const uploadFiles = async (
return files.map((filepath) => ({ id: '', filepath }));
}
let uploadProgress: SingleBar | undefined;
if (progress) {
uploadProgress = new SingleBar(
{
format: 'Uploading assets | {bar} | {percentage}% | ETA: {eta_formatted} | {value_formatted}/{total_formatted}',
},
Presets.shades_classic,
);
} else {
console.log(`Uploading ${files.length} asset${s(files.length)} (${byteSize(totalSize)})`);
}
uploadProgress?.start(totalSize, 0);
uploadProgress?.update({ value_formatted: 0, total_formatted: byteSize(totalSize) });
const uploadProgress = new SingleBar(
{ format: 'Uploading assets | {bar} | {percentage}% | ETA: {eta_formatted} | {value_formatted}/{total_formatted}' },
Presets.shades_classic,
);
uploadProgress.start(totalSize, 0);
uploadProgress.update({ value_formatted: 0, total_formatted: byteSize(totalSize) });
let duplicateCount = 0;
let duplicateSize = 0;
@@ -321,7 +222,7 @@ export const uploadFiles = async (
successSize += stats.size ?? 0;
}
uploadProgress?.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
uploadProgress.update(successSize, { value_formatted: byteSize(successSize + duplicateSize) });
return response;
},
@@ -334,7 +235,7 @@ export const uploadFiles = async (
await queue.drained();
uploadProgress?.stop();
uploadProgress.stop();
console.log(`Successfully uploaded ${successCount} new asset${s(successCount)} (${byteSize(successSize)})`);
if (duplicateCount > 0) {

View File

@@ -69,13 +69,6 @@ program
.default(4),
)
.addOption(new Option('--delete', 'Delete local assets after upload').env('IMMICH_DELETE_ASSETS'))
.addOption(new Option('--no-progress', 'Hide progress bars').env('IMMICH_PROGRESS_BAR').default(true))
.addOption(
new Option('--watch', 'Watch for changes and upload automatically')
.env('IMMICH_WATCH_CHANGES')
.default(false)
.implies({ progress: false }),
)
.argument('[paths...]', 'One or more paths to assets to be uploaded')
.action((paths, options) => upload(paths, program.opts(), options));

View File

@@ -1,13 +1,11 @@
import mockfs from 'mock-fs';
import { readFileSync } from 'node:fs';
import { Batcher, CrawlOptions, crawl } from 'src/utils';
import { Mock } from 'vitest';
import { CrawlOptions, crawl } from 'src/utils';
interface Test {
test: string;
options: Omit<CrawlOptions, 'extensions'>;
files: Record<string, boolean>;
skipOnWin32?: boolean;
}
const cwd = process.cwd();
@@ -50,18 +48,6 @@ const tests: Test[] = [
'/photos/image.jpg': true,
},
},
{
test: 'should crawl folders with quotes',
options: {
pathsToCrawl: ["/photo's/", '/photo"s/', '/photo`s/'],
},
files: {
"/photo's/image1.jpg": true,
'/photo"s/image2.jpg': true,
'/photo`s/image3.jpg': true,
},
skipOnWin32: true, // single quote interferes with mockfs root on Windows
},
{
test: 'should crawl a single file',
options: {
@@ -284,12 +270,8 @@ describe('crawl', () => {
});
describe('crawl', () => {
for (const { test: name, options, files, skipOnWin32 } of tests) {
if (process.platform === 'win32' && skipOnWin32) {
test.skip(name);
continue;
}
it(name, async () => {
for (const { test, options, files } of tests) {
it(test, async () => {
// The file contents is the same as the path.
mockfs(Object.fromEntries(Object.keys(files).map((file) => [file, file])));
@@ -304,38 +286,3 @@ describe('crawl', () => {
}
});
});
describe('Batcher', () => {
let batcher: Batcher;
let onBatch: Mock;
beforeEach(() => {
onBatch = vi.fn();
batcher = new Batcher({ batchSize: 2, onBatch });
});
it('should trigger onBatch() when a batch limit is reached', async () => {
batcher.add('a');
batcher.add('b');
batcher.add('c');
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a', 'b']);
});
it('should trigger onBatch() when flush() is called', async () => {
batcher.add('a');
batcher.flush();
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a']);
});
it('should trigger onBatch() when debounce time reached', async () => {
vi.useFakeTimers();
batcher = new Batcher({ batchSize: 2, debounceTimeMs: 100, onBatch });
batcher.add('a');
expect(onBatch).not.toHaveBeenCalled();
vi.advanceTimersByTime(200);
expect(onBatch).toHaveBeenCalledOnce();
expect(onBatch).toHaveBeenCalledWith(['a']);
vi.useRealTimers();
});
});

View File

@@ -146,7 +146,7 @@ export const crawl = async (options: CrawlOptions): Promise<string[]> => {
}
const searchPatterns = patterns.map((pattern) => {
let escapedPattern = pattern.replaceAll("'", "[']").replaceAll('"', '["]').replaceAll('`', '[`]');
let escapedPattern = pattern;
if (recursive) {
escapedPattern = escapedPattern + '/**';
}
@@ -172,64 +172,3 @@ export const sha1 = (filepath: string) => {
rs.on('end', () => resolve(hash.digest('hex')));
});
};
/**
* Batches items and calls onBatch to process them
* when the batch size is reached or the debounce time has passed.
*/
export class Batcher<T = unknown> {
private items: T[] = [];
private readonly batchSize: number;
private readonly debounceTimeMs?: number;
private readonly onBatch: (items: T[]) => void;
private debounceTimer?: NodeJS.Timeout;
constructor({
batchSize,
debounceTimeMs,
onBatch,
}: {
batchSize: number;
debounceTimeMs?: number;
onBatch: (items: T[]) => Promise<void>;
}) {
this.batchSize = batchSize;
this.debounceTimeMs = debounceTimeMs;
this.onBatch = onBatch;
}
private setDebounceTimer() {
if (this.debounceTimer) {
clearTimeout(this.debounceTimer);
}
if (this.debounceTimeMs) {
this.debounceTimer = setTimeout(() => this.flush(), this.debounceTimeMs);
}
}
private clearDebounceTimer() {
if (this.debounceTimer) {
clearTimeout(this.debounceTimer);
this.debounceTimer = undefined;
}
}
add(item: T) {
this.items.push(item);
this.setDebounceTimer();
if (this.items.length >= this.batchSize) {
this.flush();
}
}
flush() {
this.clearDebounceTimer();
if (this.items.length === 0) {
return;
}
this.onBatch(this.items);
this.items = [];
}
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.52.0"
constraints = "4.52.0"
version = "4.48.0"
constraints = "4.48.0"
hashes = [
"h1:2BEJyXJtYC4B4nda/WCYUmuJYDaYk88F8t1pwPzr0iQ=",
"h1:4IASk5SESeWKQ7JU0+M7KApuF5mZyklvwMXPBabim3c=",
"h1:5ImZxxALSnWfH/4EXw/wFirSmk5Tr0ACmcysy51AafE=",
"h1:6TJ3dxLSin4ZKBJLsZDn95H2ZYnGm8S7GGHvvXuuMQU=",
"h1:IzTUjg9kQ4N3qizP9CjYLeHwjsuGgtxwXvfUQWyOLcA=",
"h1:NTaOQfYINA0YTG/V1/9+SYtgX1it63+cBugj4WK4FWc=",
"h1:PXH48LuJn329sCfMXprdMDk51EZaWFyajVvS03qhQLs=",
"h1:Pi5M+GeoMSN2eJ6QnIeXjBf19O+rby/74CfB2ocpv20=",
"h1:ShXZ2ZjBvm3thfoPPzPT8+OhyismnydQVkUAfI8X12w=",
"h1:WQ9hu0Wge2msBbODfottCSKgu8oKUrw4Opz+fDPVVHk=",
"h1:Z5yXML2DE0uH9UU+M0ut9JMQAORcwVZz1CxBHzeBmao=",
"h1:jqI2qKknpleS3JDSplyGYHMu0u9K/tor1ZOjFwDgEMk=",
"h1:kgfutDh14Q5nw4eg6qGFamFxIiY8Ae0FPKRBLDOzpcI=",
"h1:zCAO7GZmfYhWb+i6TfqlqhMeDyPZWGio2IzEzAh3YTs=",
"zh:19be1a91c982b902c42aba47766860dfa5dc151eed1e95fd39ca642229381ef0",
"zh:1de451c4d1ecf7efbe67b6dace3426ba810711afdd644b0f1b870364c8ae91f8",
"zh:352b4a2120173298622e669258744554339d959ac3a95607b117a48ee4a83238",
"zh:3c6f1346d9154afbd2d558fabb4b0150fc8d559aa961254144fe1bc17fe6032f",
"zh:4c4c92d53fb535b1e0eff26f222bbd627b97d3b4c891ec9c321268676d06152f",
"zh:53276f68006c9ceb7cdb10a6ccf91a5c1eadd1407a28edb5741e84e88d7e29e8",
"zh:7925a97773948171a63d4f65bb81ee92fd6d07a447e36012977313293a5435c9",
"zh:7dfb0a4496cfe032437386d0a2cd9229a1956e9c30bd920923c141b0f0440060",
"h1:0IKUOR32xEI1suS5QCOjfxjQ2mRd058btXk8hVnaOJ4=",
"h1:3YG6vu/bFPcYOeLdSUZhiAWiWKaFlOAR34z2o8cbE9k=",
"h1:FvGy06/i9AMtVkSIUnCrXNv5xF6jqBqMH8oPVLyeeAg=",
"h1:GXH7nIF0ocMqebbA41+fSGIYfM+VAM/PvTe7fJr8UrQ=",
"h1:H0ll0ph4404vFE868W3qJ3zhOyy4jbXrOMtdkViEZsU=",
"h1:SX42e3k73IcFcrQlZ2e/Veqt2tvCMy6fwlo5yNUktCE=",
"h1:Uu/gjBc99GefdPdSrlBwU75DWU0ZcwGcrd3ZFyTeL0s=",
"h1:VZw0uN41PWRmNlhg7Ze0Eh7cdoklX1oZbfNAXNYnU1I=",
"h1:cMdV7ql6PsFa4qtb0EoZSctvTaTqV7yplBSDwcLRCLc=",
"h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=",
"h1:fOYufF+1bzw2N3aHLpkLB6E8VbZ4ysXDODYQOlwhwd4=",
"h1:qe8RbnWq0T4xhqjn9QcbO6YW5YDx47P+eJ0NUMIfwCc=",
"h1:tRD2av6PafHDP/b9jDQsG5/aX+lHeKxpbIEHYYLBVUc=",
"h1:zyl6Gvx/CFpwYW8pFFDesfO8Lxv+a6CopyAsIMhp54s=",
"zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c",
"zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997",
"zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b",
"zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb",
"zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153",
"zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8",
"zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f",
"zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04",
"zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937",
"zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8d4aa79f0a414bb4163d771063c70cd991c8fac6c766e685bac2ee12903c5bd6",
"zh:a67540c13565616a7e7e51ee9366e88b0dc60046e1d75c72680e150bd02725bb",
"zh:a936383a4767f5393f38f622e92bf2d0c03fe04b69c284951f27345766c7b31b",
"zh:d4887d73c466ff036eecf50ad6404ba38fd82ea4855296b1846d244b0f13c380",
"zh:e9093c8bd5b6cd99c81666e315197791781b8f93afa14fc2e0f732d1bb2a44b7",
"zh:efd3b3f1ec59a37f635aa1d4efcf178734c2fcf8ddb0d56ea690bec342da8672",
"zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c",
"zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532",
"zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f",
"zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.52.0"
version = "4.48.0"
}
}
}

View File

@@ -2,37 +2,37 @@
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/cloudflare/cloudflare" {
version = "4.52.0"
constraints = "4.52.0"
version = "4.48.0"
constraints = "4.48.0"
hashes = [
"h1:2BEJyXJtYC4B4nda/WCYUmuJYDaYk88F8t1pwPzr0iQ=",
"h1:4IASk5SESeWKQ7JU0+M7KApuF5mZyklvwMXPBabim3c=",
"h1:5ImZxxALSnWfH/4EXw/wFirSmk5Tr0ACmcysy51AafE=",
"h1:6TJ3dxLSin4ZKBJLsZDn95H2ZYnGm8S7GGHvvXuuMQU=",
"h1:IzTUjg9kQ4N3qizP9CjYLeHwjsuGgtxwXvfUQWyOLcA=",
"h1:NTaOQfYINA0YTG/V1/9+SYtgX1it63+cBugj4WK4FWc=",
"h1:PXH48LuJn329sCfMXprdMDk51EZaWFyajVvS03qhQLs=",
"h1:Pi5M+GeoMSN2eJ6QnIeXjBf19O+rby/74CfB2ocpv20=",
"h1:ShXZ2ZjBvm3thfoPPzPT8+OhyismnydQVkUAfI8X12w=",
"h1:WQ9hu0Wge2msBbODfottCSKgu8oKUrw4Opz+fDPVVHk=",
"h1:Z5yXML2DE0uH9UU+M0ut9JMQAORcwVZz1CxBHzeBmao=",
"h1:jqI2qKknpleS3JDSplyGYHMu0u9K/tor1ZOjFwDgEMk=",
"h1:kgfutDh14Q5nw4eg6qGFamFxIiY8Ae0FPKRBLDOzpcI=",
"h1:zCAO7GZmfYhWb+i6TfqlqhMeDyPZWGio2IzEzAh3YTs=",
"zh:19be1a91c982b902c42aba47766860dfa5dc151eed1e95fd39ca642229381ef0",
"zh:1de451c4d1ecf7efbe67b6dace3426ba810711afdd644b0f1b870364c8ae91f8",
"zh:352b4a2120173298622e669258744554339d959ac3a95607b117a48ee4a83238",
"zh:3c6f1346d9154afbd2d558fabb4b0150fc8d559aa961254144fe1bc17fe6032f",
"zh:4c4c92d53fb535b1e0eff26f222bbd627b97d3b4c891ec9c321268676d06152f",
"zh:53276f68006c9ceb7cdb10a6ccf91a5c1eadd1407a28edb5741e84e88d7e29e8",
"zh:7925a97773948171a63d4f65bb81ee92fd6d07a447e36012977313293a5435c9",
"zh:7dfb0a4496cfe032437386d0a2cd9229a1956e9c30bd920923c141b0f0440060",
"h1:0IKUOR32xEI1suS5QCOjfxjQ2mRd058btXk8hVnaOJ4=",
"h1:3YG6vu/bFPcYOeLdSUZhiAWiWKaFlOAR34z2o8cbE9k=",
"h1:FvGy06/i9AMtVkSIUnCrXNv5xF6jqBqMH8oPVLyeeAg=",
"h1:GXH7nIF0ocMqebbA41+fSGIYfM+VAM/PvTe7fJr8UrQ=",
"h1:H0ll0ph4404vFE868W3qJ3zhOyy4jbXrOMtdkViEZsU=",
"h1:SX42e3k73IcFcrQlZ2e/Veqt2tvCMy6fwlo5yNUktCE=",
"h1:Uu/gjBc99GefdPdSrlBwU75DWU0ZcwGcrd3ZFyTeL0s=",
"h1:VZw0uN41PWRmNlhg7Ze0Eh7cdoklX1oZbfNAXNYnU1I=",
"h1:cMdV7ql6PsFa4qtb0EoZSctvTaTqV7yplBSDwcLRCLc=",
"h1:ePGvSurmlqOCkD761vkhRmz7bsK36/EnIvx2Xy8TdXo=",
"h1:fOYufF+1bzw2N3aHLpkLB6E8VbZ4ysXDODYQOlwhwd4=",
"h1:qe8RbnWq0T4xhqjn9QcbO6YW5YDx47P+eJ0NUMIfwCc=",
"h1:tRD2av6PafHDP/b9jDQsG5/aX+lHeKxpbIEHYYLBVUc=",
"h1:zyl6Gvx/CFpwYW8pFFDesfO8Lxv+a6CopyAsIMhp54s=",
"zh:04c0a49c2b23140b2f21cfd0d52f9798d70d3bdae3831613e156aabe519bbc6c",
"zh:185f21b4834ba63e8df1f84aa34639d8a7e126429a4007bb5f9ad82f2602a997",
"zh:234724f52cb4c0c3f7313d3b2697caef26d921d134f26ae14801e7afac522f7b",
"zh:38a56fcd1b3e40706af995611c977816543b53f1e55fe2720944aae2b6828fcb",
"zh:419938f5430fc78eff933470aefbf94a460a478f867cf7761a3dea177b4eb153",
"zh:4b46d92bfde1deab7de7ba1a6bbf4ba7c711e4fd925341ddf09d4cc28dae03d8",
"zh:537acd4a31c752f1bae305ba7190f60b71ad1a459f22d464f3f914336c9e919f",
"zh:5ff36b005aad07697dd0b30d4f0c35dbcdc30dc52b41722552060792fa87ce04",
"zh:635c5ee419daea098060f794d9d7d999275301181e49562c4e4c08f043076937",
"zh:859277c330d61f91abe9e799389467ca11b77131bf34bedbef52f8da68b2bb49",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8d4aa79f0a414bb4163d771063c70cd991c8fac6c766e685bac2ee12903c5bd6",
"zh:a67540c13565616a7e7e51ee9366e88b0dc60046e1d75c72680e150bd02725bb",
"zh:a936383a4767f5393f38f622e92bf2d0c03fe04b69c284951f27345766c7b31b",
"zh:d4887d73c466ff036eecf50ad6404ba38fd82ea4855296b1846d244b0f13c380",
"zh:e9093c8bd5b6cd99c81666e315197791781b8f93afa14fc2e0f732d1bb2a44b7",
"zh:efd3b3f1ec59a37f635aa1d4efcf178734c2fcf8ddb0d56ea690bec342da8672",
"zh:927dfdb8d9aef37ead03fceaa29e87ba076a3dd24e19b6cefdbb0efe9987ff8c",
"zh:bbf2226f07f6b1e721877328e69ded4b64f9c196634d2e2429e3cfabbe41e532",
"zh:daeed873d6f38604232b46ee4a5830c85d195b967f8dbcafe2fcffa98daf9c5f",
"zh:f8f2fc4646c1ba44085612fa7f4dbb7cbcead43b4e661f2b98ddfb4f68afc758",
]
}

View File

@@ -5,7 +5,7 @@ terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "4.52.0"
version = "4.48.0"
}
}
}

View File

@@ -1,13 +1,4 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
# For development see:
# See:
# - https://immich.app/docs/developer/setup
# - https://immich.app/docs/developer/troubleshooting
@@ -80,7 +71,6 @@ services:
- ../web:/usr/src/app
- ../i18n:/usr/src/i18n
- ../open-api/:/usr/src/open-api/
# - ../../ui:/usr/ui
- /usr/src/app/node_modules
ulimits:
nofile:
@@ -116,13 +106,13 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
healthcheck:
test: redis-cli ping || exit 1
database:
container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env_file:
- .env
environment:

View File

@@ -1,12 +1,3 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich-prod
services:
@@ -56,14 +47,14 @@ services:
redis:
container_name: immich_redis
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env_file:
- .env
environment:
@@ -100,7 +91,7 @@ services:
container_name: immich_prometheus
ports:
- 9090:9090
image: prom/prometheus@sha256:6927e0919a144aa7616fd0137d4816816d42f6b816de3af269ab065250859a62
image: prom/prometheus@sha256:565ee86501224ebbb98fc10b332fa54440b100469924003359edf49cbce374bd
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
@@ -112,7 +103,7 @@ services:
command: ['./run.sh', '-disable-reporting']
ports:
- 3000:3000
image: grafana/grafana:11.5.2-ubuntu@sha256:8b5858c447e06fd7a89006b562ba7bba7c4d5813600c7982374c41852adefaeb
image: grafana/grafana:11.4.0-ubuntu@sha256:afccec22ba0e4815cca1d2bf3836e414322390dc78d77f1851976ffa8d61051c
volumes:
- grafana-data:/var/lib/grafana

View File

@@ -1,11 +1,10 @@
#
# WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
# WARNING: Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
#
name: immich
@@ -49,14 +48,14 @@ services:
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
image: docker.io/redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}

View File

@@ -48,7 +48,6 @@ services:
vaapi-wsl: # use this for VAAPI if you're running Immich in WSL2
devices:
- /dev/dri:/dev/dri
- /dev/dxg:/dev/dxg
volumes:
- /usr/lib/wsl:/usr/lib/wsl
environment:

View File

@@ -1 +1 @@
22.14.0
22.12.0

View File

@@ -49,22 +49,13 @@ The behaviors differ based on your device manufacturer and operating system, but
On iOS (iPhone and iPad), the operating system determines if a particular app can invoke background tasks based on multiple factors, most of which the Immich app has no control over. To increase the likelihood that the background backup task is run, follow the steps below:
- Enable Background App Refresh for Immich in the iOS settings at `Settings > General > Background App Refresh`.
- Disable **Low Power Mode** when not needed, as this can prevent apps from running in the background.
- Disable Background App Refresh for apps that don't need background tasks to run. This will reduce the competition for background task invocation for Immich.
- Use the Immich app more often.
### Why are features in the mobile app not working with a self-signed certificate, Basic Auth, custom headers, or mutual TLS?
### Why are features not working with a self-signed cert or mTLS?
These network features are experimental. They often do not work with video playback, asset upload or download, and other features.
Many of these limitations are tracked in [#15230](https://github.com/immich-app/immich/issues/15230).
Instead of these experimental features, we recommend using the URL switching feature, a VPN, or a [free trusted SSL certificate](https://letsencrypt.org/) for your domain.
We are not actively developing these features and will not be able to provide support, but welcome contributions to improve them.
Please discuss any large PRs with our dev team to ensure your time is not wasted.
### Why isn't the mobile app updated yet?
The app stores can take a few days to approve new builds of the app. If you're impatient, android APKs can be downloaded from the GitHub releases.
Due to limitations in the upstream app/video library, using a self-signed TLS certificate or mutual TLS may break video playback or asset upload (both foreground and/or background).
We recommend using a real SSL certificate from a free provider, for example [Let's Encrypt](https://letsencrypt.org/).
---
@@ -97,7 +88,7 @@ Make sure to [set your reverse proxy](/docs/administration/reverse-proxy/) to al
Also, check the disk space of your reverse proxy.
In some cases, proxies cache requests to disk before passing them on, and if disk space runs out, the request fails.
If you are using Cloudflare Tunnel, please know that they set a maximum filesize of 100 MB that cannot be changed.
If you are using Cloudflare Tunnel, please know that they set a maxiumum filesize of 100 MB that cannot be changed.
At times, files larger than this may work, potentially up to 1 GB. However, the official limit is 100 MB.
If you are having issues, we recommend switching to a different network deployment.
@@ -164,35 +155,6 @@ For example, say you have existing transcodes with the policy "Videos higher tha
No. Our design principle is that the original assets should always be untouched.
### How can I mount a CIFS/Samba volume within Docker?
If you aren't able to or prefer not to mount Samba on the host (such as Windows environment), you can mount the volume within Docker.
Below is an example in the `docker-compose.yml`.
Change your username, password, local IP, and share name, and see below where the line `- originals:/usr/src/app/originals`,
correlates to the section where the volume `originals` was created. You can call this whatever you like, and map it to the docker container as you like.
For example you could change `originals:` to `Photos:`, and change `- originals:/usr/src/app/originals` to `Photos:/usr/src/app/photos`.
```diff
...
services:
immich-server:
...
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
+ - originals:/usr/src/app/originals
...
volumes:
model-cache:
+ originals:
+ driver_opts:
+ type: cifs
+ o: 'iocharset=utf8,username=USERNAMEHERE,password=PASSWORDHERE,rw' # change to `ro` if read only desired
+ device: '//localipaddress/sharename'
```
---
## Albums
@@ -315,7 +277,7 @@ The initial backup is the most intensive due to the number of jobs running. The
- For facial recognition on new images to work properly, You must re-run the Face Detection job for all images after this.
- At the container level, you can [set resource constraints](/docs/FAQ#can-i-limit-cpu-and-ram-usage) to lower usage further.
- It's recommended to only apply these constraints _after_ taking some of the measures here for best performance.
- If these changes are not enough, see [above](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
- If these changes are not enough, see [below](/docs/FAQ#how-can-i-disable-machine-learning) for instructions on how to disable machine learning.
### Can I limit CPU and RAM usage?
@@ -458,7 +420,7 @@ A result of `on` means that checksums are enabled.
<summary>Check if checksums are enabled</summary>
```bash
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="show data_checksums"
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="show data_checksums"
data_checksums
----------------
on
@@ -473,7 +435,7 @@ If checksums are enabled, you can check the status of the database with the foll
<summary>Check for database corruption</summary>
```bash
docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME> --command="SELECT datname, checksum_failures, checksum_last_failure FROM pg_stat_database WHERE datname IS NOT NULL"
datname | checksum_failures | checksum_last_failure
-----------+-------------------+-----------------------
postgres | 0 |
@@ -485,24 +447,6 @@ docker exec -it immich_postgres psql --dbname=postgres --username=<DB_USERNAME>
</details>
You can also scan the Postgres database file structure for errors:
<details>
<summary>Scan for file structure errors</summary>
```bash
docker exec -it immich_postgres pg_amcheck --username=postgres --heapallindexed --parent-check --rootdescend --progress --all --install-missing
```
A normal result will end something like this and return with an exit code of `0`:
```bash
7470/8832 relations (84%), 730829/734735 pages (99%)
8425/8832 relations (95%), 734367/734735 pages (99%)
8832/8832 relations (100%), 734735/734735 pages (100%)
```
</details>
If corruption is detected, you should immediately make a backup before performing any other work in the database.
To do so, you may need to set the `zero_damaged_pages=on` flag for the database server to allow `pg_dumpall` to succeed.
After taking a backup, the recommended next step is to restore the database from a healthy backup before corruption was detected.

View File

@@ -55,7 +55,7 @@ sleep 10 # Wait for Postgres server to start up
# Check the database user if you deviated from the default
gunzip < "/path/to/backup/dump.sql.gz" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| docker exec -i immich_postgres psql --dbname=postgres --username=<DB_USERNAME> # Restore Backup
| docker exec -i immich_postgres psql --username=postgres # Restore Backup
docker compose up -d # Start remainder of Immich apps
```
@@ -70,16 +70,18 @@ docker compose up -d # Start remainder of Immich apps
docker compose down -v # CAUTION! Deletes all Immich data to start from scratch
## Uncomment the next line and replace DB_DATA_LOCATION with your Postgres path to permanently reset the Postgres database
# Remove-Item -Recurse -Force DB_DATA_LOCATION # CAUTION! Deletes all Immich data to start from scratch
## You should mount the backup (as a volume, example: `- 'C:\path\to\backup\dump.sql:/dump.sql'`) into the immich_postgres container using the docker-compose.yml
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
# Check the database user if you deviated from the default. If your backup ends in `.gz`, replace `cat` with `gunzip`
cat < "/dump.sql" | sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" | psql --dbname=postgres --username=<DB_USERNAME>
exit # Exit the Docker shell
docker compose up -d # Start remainder of Immich apps
## You should mount the backup (as a volume, example: - 'C:\path\to\backup\dump.sql':/dump.sql) into the immich_postgres container using the docker-compose.yml
docker compose pull # Update to latest version of Immich (if desired)
docker compose create # Create Docker containers for Immich apps without running them
docker start immich_postgres # Start Postgres server
sleep 10 # Wait for Postgres server to start up
docker exec -it immich_postgres bash # Enter the Docker shell and run the following command
# Check the database user if you deviated from the default
cat "/dump.sql" \
| sed "s/SELECT pg_catalog.set_config('search_path', '', false);/SELECT pg_catalog.set_config('search_path', 'public, pg_catalog', true);/g" \
| psql --username=postgres # Restore Backup
exit # Exit the Docker shell
docker compose up -d # Start remainder of Immich apps
```
</TabItem>
@@ -93,14 +95,12 @@ Some deployment methods make it difficult to start the database without also sta
## Filesystem
Immich stores two types of content in the filesystem: (a) original, unmodified assets (photos and videos), and (b) generated content. We recommend backing up the entire contents of `UPLOAD_LOCATION`, but only the original content is critical, which is stored in the following folders:
Immich stores two types of content in the filesystem: (1) original, unmodified assets (photos and videos), and (2) generated content. Only the original content needs to be backed-up, which is stored in the following folders:
1. `UPLOAD_LOCATION/library`
2. `UPLOAD_LOCATION/upload`
3. `UPLOAD_LOCATION/profile`
If you choose to back up only those folders, you will need to rerun the transcoding and thumbnail generation jobs for all assets after you restore from a backup.
:::caution
If you moved some of these folders onto a different storage device, such as `profile/`, make sure to adjust the backup path to match your setup
:::

View File

@@ -42,10 +42,6 @@ Typically Immich expects superuser permission in the database, which you can gra
This method is recommended for **advanced users only** and often requires manual intervention when updating Immich.
:::
:::danger
Currently, automated backups require superuser permission due to the usage of `pg_dumpall`.
:::
Immich can run without superuser permissions by following the below instructions at the `psql` prompt to prepare the database.
```sql title="Set up Postgres for Immich"
@@ -70,4 +66,4 @@ When installing a new version of pgvecto.rs, you will need to manually update th
If you get the error `driverError: error: permission denied for view pg_vector_index_stat`, you can fix this by connecting to the Immich database and running `GRANT SELECT ON TABLE pg_vector_index_stat TO <immichdbusername>;`.
[vectors-install]: https://docs.vectorchord.ai/getting-started/installation.html
[vectors-install]: https://docs.pgvecto.rs/getting-started/installation.html

View File

@@ -98,14 +98,6 @@ The default Immich log level is `Log` (commonly known as `Info`). The Immich adm
Through this setting, you can manage all the settings related to machine learning in Immich, from the setting of remote machine learning to the model and its parameters
You can choose to disable a certain type of machine learning, for example smart search or facial recognition.
### URL
The built in (`http://immich-machine-learning:3003`) machine learning server will be configured by default, but you can change this or add additional servers.
Hosting the `immich-machine-learning` container on a machine with a more powerful GPU can be helpful to for processing a large number of photos (such as during batch import) or for faster search.
If more than one URL is provided, each server will be attempted one-at-a-time until one responds successfully, in order from first to last. Servers that don't respond will be temporarily ignored until they come back online.
### Smart Search
The [smart search](/docs/features/searching) settings allow you to change the [CLIP model](https://openai.com/research/clip). Larger models will typically provide [more accurate search results](https://github.com/immich-app/immich/discussions/11862) but consume more processing power and RAM. When [changing the CLIP model](/docs/FAQ#can-i-use-a-custom-clip-model) it is mandatory to re-run the Smart Search job on all images to fully apply the change.

View File

@@ -50,18 +50,19 @@ The Immich CLI is an [npm](https://www.npmjs.com/) package that lets users contr
The Immich backend is divided into several services, which are run as individual docker containers.
1. `immich-server` - Handle and respond to REST API requests, execute background jobs (thumbnail generation, metadata extraction, transcoding, etc.)
1. `immich-server` - Handle and respond to REST API requests
1. `immich-microservices` - Execute background jobs (thumbnail generation, metadata extraction, transcoding, etc.)
1. `immich-machine-learning` - Execute machine learning models
1. `postgres` - Persistent data storage
1. `redis`- Queue management for background jobs
1. `redis`- Queue management for `immich-microservices`
### Immich Server
The Immich Server is a [TypeScript](https://www.typescriptlang.org/) project written for [Node.js](https://nodejs.org/). It uses the [Nest.js](https://nestjs.com) framework, [Express](https://expressjs.com/) server, and the query builder [Kysely](https://kysely.dev/). The server codebase also loosely follows the [Hexagonal Architecture](<https://en.wikipedia.org/wiki/Hexagonal_architecture_(software)>). Specifically, we aim to separate technology specific implementations (`src/repositories`) from core business logic (`src/services`).
The Immich Server is a [TypeScript](https://www.typescriptlang.org/) project written for [Node.js](https://nodejs.org/). It uses the [Nest.js](https://nestjs.com) framework, with [TypeORM](https://typeorm.io/) for database management. The server codebase also loosely follows the [Hexagonal Architecture](<https://en.wikipedia.org/wiki/Hexagonal_architecture_(software)>). Specifically, we aim to separate technology specific implementations (`infra/`) from core business logic (`domain/`).
### API Endpoints
#### REST Endpoints
An incoming HTTP request is mapped to a controller (`src/controllers`). Controllers are collections of HTTP endpoints. Each controller usually implements the following CRUD operations for its respective resource type:
The server is a list of HTTP endpoints and associated handlers (controllers). Each controller usually implements the following CRUD operations:
- `POST` `/<type>` - **Create**
- `GET` `/<type>` - **Read** (all)
@@ -69,13 +70,13 @@ An incoming HTTP request is mapped to a controller (`src/controllers`). Controll
- `PUT` `/<type>/:id` - **Updated** (by id)
- `DELETE` `/<type>/:id` - **Delete** (by id)
### Domain Transfer Objects (DTOs)
#### DTOs
The server uses [Domain Transfer Objects](https://en.wikipedia.org/wiki/Data_transfer_object) as public interfaces for the inputs (query, params, and body) and outputs (response) for each endpoint. DTOs translate to [OpenAPI](./open-api.md) schemas and control the generated code used by each client.
### Background Jobs
### Microservices
Immich uses a [worker](https://github.com/immich-app/immich/blob/main/server/src/utils/misc.ts#L266) to run background jobs. These jobs include:
The Immich Microservices image uses the same `Dockerfile` as the Immich Server, but with a different entrypoint. The Immich Microservices service mainly handles executing jobs, which include the following:
- Thumbnail Generation
- Metadata Extraction

View File

@@ -63,20 +63,9 @@ If you only want to do web development connected to an existing, remote backend,
IMMICH_SERVER_URL=https://demo.immich.app/ npm run dev
```
#### `@immich/ui`
To see local changes to `@immich/ui` in Immich, do the following:
1. Install `@immich/ui` as a sibling to `immich/`, for example `/home/user/immich` and `/home/user/ui`
1. Build the `@immich/ui` project via `npm run build`
1. Uncomment the corresponding volume in web service of the `docker/docker-compose.dev.yaml` file (`../../ui:/usr/ui`)
1. Uncomment the corresponding alias in the `web/vite.config.js` file (`'@immich/ui': path.resolve(\_\_dirname, '../../ui')`)
1. Start up the stack via `make dev`
1. After making changes in `@immich/ui`, rebuild it (`npm run build`)
### Mobile app
The mobile app `(/mobile)` will required Flutter toolchain 3.13.x and FVM to be installed on your system.
The mobile app `(/mobile)` will required Flutter toolchain 3.13.x to be installed on your system.
Please refer to the [Flutter's official documentation](https://flutter.dev/docs/get-started/install) for more information on setting up the toolchain on your machine.

View File

@@ -69,8 +69,6 @@ Navigating to Administration > Settings > Machine Learning Settings > Facial Rec
:::tip
It's better to only tweak the parameters here than to set them to something very different unless you're ready to test a variety of options. If you do need to set a parameter to a strict setting, relaxing other settings can be a good option to compensate, and vice versa.
You can learn how the tune the result in this [Guide](/docs/guides/better-facial-clusters)
:::
### Facial recognition model

View File

@@ -58,7 +58,7 @@ If your photos are on a network drive, automatic file watching likely won't work
#### Troubleshooting
If you encounter an `ENOSPC` error, you need to increase your file watcher limit. In sysctl, this key is called `fs.inotify.max_user_watches` and has a default value of 8192. Increase this number to a suitable value greater than the number of files you will be watching. Note that Immich has to watch all files in your import paths including any ignored files.
If you encounter an `ENOSPC` error, you need to increase your file watcher limit. In sysctl, this key is called `fs.inotify.max_user_watched` and has a default value of 8192. Increase this number to a suitable value greater than the number of files you will be watching. Note that Immich has to watch all files in your import paths including any ignored files.
```
ERROR [LibraryService] Library watcher for library c69faf55-f96d-4aa0-b83b-2d80cbc27d98 encountered error: Error: ENOSPC: System limit for number of file watchers reached, watch '/media/photo.jpg'
@@ -68,7 +68,7 @@ In rare cases, the library watcher can hang, preventing Immich from starting up.
### Nightly job
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion. It is possible to trigger the cleanup by clicking "Scan all libraries" in the library managment page.
There is an automatic scan job that is scheduled to run once a day. This job also cleans up any libraries stuck in deletion.
## Usage
@@ -111,10 +111,11 @@ These actions must be performed by the Immich administrator.
- Click on Administration -> Libraries
- Click on Create External Library
- Select which user owns the library, this can not be changed later
- Enter `/mnt/media/christmas-trip` then click Add
- Click on Save
- Click the drop-down menu on the newly created library
- Click on Rename Library and rename it to "Christmas Trip"
- Click Edit Import Paths
- Click on Add Path
- Enter `/mnt/media/christmas-trip` then click Add
NOTE: We have to use the `/mnt/media/christmas-trip` path and not the `/mnt/nas/christmas-trip` path since all paths have to be what the Docker containers see.

View File

@@ -11,7 +11,7 @@ You do not need to redo any machine learning jobs after enabling hardware accele
- ARM NN (Mali)
- CUDA (NVIDIA GPUs with [compute capability](https://developer.nvidia.com/cuda-gpus) 5.2 or higher)
- OpenVINO (Intel GPUs such as Iris Xe and Arc)
- OpenVINO (Intel discrete GPUs such as Iris Xe and Arc)
## Limitations
@@ -43,9 +43,8 @@ You do not need to redo any machine learning jobs after enabling hardware accele
#### OpenVINO
- Integrated GPUs are more likely to experience issues than discrete GPUs, especially for older processors or servers with low RAM.
- The server must have a discrete GPU, i.e. Iris Xe or Arc. Expect issues when attempting to use integrated graphics.
- Ensure the server's kernel version is new enough to use the device for hardware accceleration.
- Expect higher RAM usage when using OpenVINO compared to CPU processing.
## Setup

View File

@@ -36,15 +36,11 @@ You can enable automatic backup on supported devices. For more information see [
If you have a large number of photos on the device, and you would prefer not to backup all the photos, then it might be prudent to only backup selected photos from device to the Immich server.
First, you need to enable the Storage Indicator in your app's settings. Navigate to **<ins>Settings -> Photo Grid</ins>** and enable **"Show Storage indicator on asset tiles"**; this makes it easy to distinguish local-only assets and synced assets.
:::note
This will enable a small cloud icon on the bottom right corner of the asset tile, indicating that the asset is synced to the server:
1. <Icon path={mdiCloudOffOutline} size={1} /> - Local-only asset; not synced to the server
2. <Icon path={mdiCloudCheckOutline} size={1} /> - Asset is synced to the server
:::
2. <Icon path={mdiCloudCheckOutline} size={1} /> - Asset is synced to the server :::
Now make sure that the local album is selected in the backup screen (steps 1-2 above). You can find these albums listed in **<ins>Library -> On this device</ins>**. To selectively upload photos from these albums, simply select the local-only photos and tap on "Upload" button in the dynamic bottom menu.

View File

@@ -68,7 +68,7 @@ After bringing down the containers with `docker compose down` and back up with `
:::note
To see exactly what metrics are made available, you can additionally add `8081:8081` to the server container's ports and `8082:8082` to the microservices container's ports.
Visiting the `/metrics` endpoint for these services will show the same raw data that Prometheus collects.
To configure these ports see [`IMMICH_API_METRICS_PORT` & `IMMICH_MICROSERVICES_METRICS_PORT`](/docs/install/environment-variables/#general).
To configure these ports see [`IMMICH_API_METRICS_PORT` & `IMMICH_MICROSERVICES_METRICS_PORT`](../install/environment-variables/#general).
:::
### Usage

View File

@@ -31,7 +31,6 @@ The filters smart search allows you to search by include:
- Not in any album
- Archived
- Favorited
- Rating
<Tabs>
<TabItem value="Computer" label="Computer" default>

View File

@@ -8,23 +8,22 @@ For the full list, refer to the [Immich source code](https://github.com/immich-a
## Image formats
| Format | Extension(s) | Supported? | Notes |
| :---------- | :---------------------------- | :----------------: | :-------------- |
| `AVIF` | `.avif` | :white_check_mark: | |
| `BMP` | `.bmp` | :white_check_mark: | |
| `GIF` | `.gif` | :white_check_mark: | |
| `HEIC` | `.heic` | :white_check_mark: | |
| `HEIF` | `.heif` | :white_check_mark: | |
| `JPEG 2000` | `.jp2` | :white_check_mark: | |
| `JPEG` | `.webp` `.jpg` `.jpe` `.insp` | :white_check_mark: | |
| `JPEG XL` | `.jxl` | :white_check_mark: | |
| `PNG` | `.webp` | :white_check_mark: | |
| `PSD` | `.psd` | :white_check_mark: | Adobe Photoshop |
| `RAW` | `.raw` | :white_check_mark: | |
| `RW2` | `.rw2` | :white_check_mark: | |
| `SVG` | `.svg` | :white_check_mark: | |
| `TIFF` | `.tif` `.tiff` | :white_check_mark: | |
| `WEBP` | `.webp` | :white_check_mark: | |
| Format | Extension(s) | Supported? | Notes |
| :-------- | :---------------------------- | :----------------: | :-------------- |
| `AVIF` | `.avif` | :white_check_mark: | |
| `BMP` | `.bmp` | :white_check_mark: | |
| `GIF` | `.gif` | :white_check_mark: | |
| `HEIC` | `.heic` | :white_check_mark: | |
| `HEIF` | `.heif` | :white_check_mark: | |
| `JPEG` | `.webp` `.jpg` `.jpe` `.insp` | :white_check_mark: | |
| `JPEG XL` | `.jxl` | :white_check_mark: | |
| `PNG` | `.webp` | :white_check_mark: | |
| `PSD` | `.psd` | :white_check_mark: | Adobe Photoshop |
| `RAW` | `.raw` | :white_check_mark: | |
| `RW2` | `.rw2` | :white_check_mark: | |
| `SVG` | `.svg` | :white_check_mark: | |
| `TIFF` | `.tif` `.tiff` | :white_check_mark: | |
| `WEBP` | `.webp` | :white_check_mark: | |
## Video formats
@@ -35,7 +34,7 @@ For the full list, refer to the [Immich source code](https://github.com/immich-a
| `FLV` | `.flv` | :white_check_mark: | |
| `M4V` | `.m4v` | :white_check_mark: | |
| `MATROSKA` | `.mkv` | :white_check_mark: | |
| `MP2T` | `.mts` `.m2ts` `.m2t` | :white_check_mark: | |
| `MP2T` | `.mts` `.m2ts` | :white_check_mark: | |
| `MP4` | `.mp4` `.insv` | :white_check_mark: | |
| `MPEG` | `.mpg` `.mpe` `.mpeg` | :white_check_mark: | |
| `QUICKTIME` | `.mov` | :white_check_mark: | |

View File

@@ -1,72 +0,0 @@
# Better Facial Recognition Clusters
## Purpose
This guide explains how to optimize facial recognition in systems with large image libraries. By following these steps, you'll achieve better clustering of faces, reducing the need for manual merging.
---
## Important Notes
- **Best Suited For:** Large image libraries after importing a significant number of images.
- **Warning:** This method deletes all previously assigned names.
- **Tip:** **Always take a [backup](/docs/administration/backup-and-restore#database) before proceeding!**
---
## Step-by-Step Instructions
### Objective
To enhance face clustering and ensure the model effectively identifies faces using qualitative initial data.
---
### Steps
#### 1. Adjust Machine Learning Settings
Navigate to:
**Admin → Administration → Settings → Machine Learning Settings**
Make the following changes:
- **Maximum recognition distance (Optional):**
Lower this value, e.g., to **0.4**, if the library contains people with similar facial features.
- **Minimum recognized faces:**
Set this to a **high value** (e.g., 20 For libraries with a large amount of assets (~100K+), and 10 for libraries with medium amount of assets (~40K+)).
> A high value ensures clusters only include faces that appear at least 20/`value` times in the library, improving the initial clustering process.
---
#### 2. Run Reset Jobs
Go to:
**Admin → Administration → Settings → Jobs**
Perform the following:
1. **FACIAL RECOGNITION → Reset**
> These reset jobs rebuild the recognition model based on the new settings.
---
#### 3. Refine Recognition with Lower Thresholds
Once the reset jobs are complete, refine the recognition as follows:
- **Step 1:**
Return to **Minimum recognized faces** in Machine Learning Settings and lower the value to **10** (In medium libraries we will lower the value from 10 to 5).
> Run the job: **FACIAL RECOGNITION → MISSING Mode**
- **Step 2:**
Lower the value again to **3**.
> Run the job: **FACIAL RECOGNITION → MISSING Mode**
:::tip try different values
For certain libraries with a larger or smaller amount of assets, other settings will be better or worse. It is recommended to try different values **before assigning names** and see which settings work best for your library.
:::
---

View File

@@ -6,7 +6,7 @@ This guide explains how to store generated and raw files with docker's volume mo
It is important to remember to update the backup settings after following the guide to back up the new backup paths if using automatic backup tools, especially `profile/`.
:::
In our `.env` file, we will define the paths we want to use. Note that you don't have to define all of these: UPLOAD_LOCATION will be the base folder that files are stored in by default, with the other paths acting as overrides.
In our `.env` file, we will define variables that will help us in the future when we want to move to a more advanced server
```diff title=".env"
# You can find documentation for all the supported environment variables [here](/docs/install/environment-variables)
@@ -21,7 +21,7 @@ In our `.env` file, we will define the paths we want to use. Note that you don't
...
```
After defining the locations of these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container. These paths are where the mount attaches inside of the container, so don't change those.
After defining the locations of these files, we will edit the `docker-compose.yml` file accordingly and add the new variables to the `immich-server` container.
```diff title="docker-compose.yml"
services:
@@ -35,8 +35,7 @@ services:
- /etc/localtime:/etc/localtime:ro
```
After making this change, you have to move the files over to the new folders to make sure Immich can find everything it needs. If you haven't uploaded anything important yet, you can also reset Immich entirely by deleting the database folder.
Then restart Immich to register the changes:
Restart Immich to register the changes.
```
docker compose up -d
@@ -50,3 +49,5 @@ The `thumbs/` folder contains both the small thumbnails displayed in the timelin
The storage metrics of the Immich server will track available storage at `UPLOAD_LOCATION`, so the administrator must set up some sort of monitoring to ensure the storage does not run out of space. The `profile/` folder is much smaller, usually less than 1 MB.
:::
Thanks to [Jrasm91](https://github.com/immich-app/immich/discussions/2110#discussioncomment-5477767) for writing the guide.

View File

@@ -5,9 +5,9 @@ Keep in mind that mucking around in the database might set the moon on fire. Avo
:::
:::tip
Run `docker exec -it immich_postgres psql --dbname=<DB_DATABASE_NAME> --username=<DB_USERNAME>` to connect to the database via the container directly.
Run `docker exec -it immich_postgres psql --dbname=immich --username=<DB_USERNAME>` to connect to the database via the container directly.
(Replace `<DB_DATABASE_NAME>` and `<DB_USERNAME>` with the values from your [`.env` file](/docs/install/environment-variables#database)).
(Replace `<DB_USERNAME>` with the value from your [`.env` file](/docs/install/environment-variables#database)).
:::
## Assets
@@ -27,14 +27,6 @@ SELECT * FROM "assets" WHERE "originalPath" = 'upload/library/admin/2023/2023-09
SELECT * FROM "assets" WHERE "originalPath" LIKE 'upload/library/admin/2023/%';
```
```sql title="Find by ID"
SELECT * FROM "assets" WHERE "id" = '9f94e60f-65b6-47b7-ae44-a4df7b57f0e9';
```
```sql title="Find by partial ID"
SELECT * FROM "assets" WHERE "id"::text LIKE '%ab431d3a%';
```
:::note
You can calculate the checksum for a particular file by using the command `sha1sum <filename>`.
:::

View File

@@ -37,7 +37,7 @@ You can alternatively download these two files from your browser and move them t
</CodeBlock>
- Populate `UPLOAD_LOCATION` with your preferred location for storing backup assets. It should be a new directory on the server with enough free space.
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publicly exposed, so this password is only used for local authentication.
- Consider changing `DB_PASSWORD` to a custom value. Postgres is not publically exposed, so this password is only used for local authentication.
To avoid issues with Docker parsing this value, it is best to use only the characters `A-Za-z0-9`. `pwgen` is a handy utility for this.
- Set your timezone by uncommenting the `TZ=` line.
- Populate custom database information if necessary.

View File

@@ -11,7 +11,7 @@ Just restarting the containers does not replace the environment within the conta
In order to recreate the container using docker compose, run `docker compose up -d`.
In most cases docker will recognize that the `.env` file has changed and recreate the affected containers.
If this does not work, try running `docker compose up -d --force-recreate`.
If this should not work, try running `docker compose up -d --force-recreate`.
:::
@@ -20,8 +20,8 @@ If this does not work, try running `docker compose up -d --force-recreate`.
| Variable | Description | Default | Containers |
| :----------------- | :------------------------------ | :-------: | :----------------------- |
| `IMMICH_VERSION` | Image tags | `release` | server, machine learning |
| `UPLOAD_LOCATION` | Host path for uploads | | server |
| `DB_DATA_LOCATION` | Host path for Postgres database | | database |
| `UPLOAD_LOCATION` | Host Path for uploads | | server |
| `DB_DATA_LOCATION` | Host Path for Postgres database | | database |
:::tip
These environment variables are used by the `docker-compose.yml` file and do **NOT** affect the containers directly.
@@ -33,15 +33,15 @@ These environment variables are used by the `docker-compose.yml` file and do **N
| :---------------------------------- | :---------------------------------------------------------------------------------------- | :--------------------------: | :----------------------- | :----------------- |
| `TZ` | Timezone | <sup>\*1</sup> | server | microservices |
| `IMMICH_ENV` | Environment (production, development) | `production` | server, machine learning | api, microservices |
| `IMMICH_LOG_LEVEL` | Log level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media location inside the container ⚠️**You probably shouldn't set this**<sup>\*2</sup>⚠️ | `./upload`<sup>\*3</sup> | server | api, microservices |
| `IMMICH_LOG_LEVEL` | Log Level (verbose, debug, log, warn, error) | `log` | server, machine learning | api, microservices |
| `IMMICH_MEDIA_LOCATION` | Media Location inside the container ⚠️**You probably shouldn't set this**<sup>\*2</sup>⚠️ | `./upload`<sup>\*3</sup> | server | api, microservices |
| `IMMICH_CONFIG_FILE` | Path to config file | | server | api, microservices |
| `NO_COLOR` | Set to `true` to disable color-coded log output | `false` | server, machine learning | |
| `CPU_CORES` | Number of cores available to the Immich server | auto-detected CPU core count | server | |
| `CPU_CORES` | Amount of cores available to the immich server | auto-detected cpu core count | server | |
| `IMMICH_API_METRICS_PORT` | Port for the OTEL metrics | `8081` | server | api |
| `IMMICH_MICROSERVICES_METRICS_PORT` | Port for the OTEL metrics | `8082` | server | microservices |
| `IMMICH_PROCESS_INVALID_IMAGES` | When `true`, generate thumbnails for invalid images | | server | microservices |
| `IMMICH_TRUSTED_PROXIES` | List of comma-separated IPs set as trusted proxies | | server | api |
| `IMMICH_TRUSTED_PROXIES` | List of comma separated IPs set as trusted proxies | | server | api |
| `IMMICH_IGNORE_MOUNT_CHECK_ERRORS` | See [System Integrity](/docs/administration/system-integrity) | | server | api, microservices |
\*1: `TZ` should be set to a `TZ identifier` from [this list][tz-list]. For example, `TZ="Etc/UTC"`.
@@ -50,7 +50,7 @@ These environment variables are used by the `docker-compose.yml` file and do **N
\*2: This path is where the Immich code looks for the files, which is internal to the docker container. Setting it to a path on your host will certainly break things, you should use the `UPLOAD_LOCATION` variable instead.
\*3: With the default `WORKDIR` of `/usr/src/app`, this path will resolve to `/usr/src/app/upload`.
It only needs to be set if the Immich deployment method is changing.
It only need to be set if the Immich deployment method is changing.
## Workers
@@ -75,12 +75,12 @@ Information on the current workers can be found [here](/docs/administration/jobs
| Variable | Description | Default | Containers |
| :---------------------------------- | :----------------------------------------------------------------------- | :----------: | :----------------------------- |
| `DB_URL` | Database URL | | server |
| `DB_HOSTNAME` | Database host | `database` | server |
| `DB_PORT` | Database port | `5432` | server |
| `DB_USERNAME` | Database user | `postgres` | server, database<sup>\*1</sup> |
| `DB_PASSWORD` | Database password | `postgres` | server, database<sup>\*1</sup> |
| `DB_DATABASE_NAME` | Database name | `immich` | server, database<sup>\*1</sup> |
| `DB_VECTOR_EXTENSION`<sup>\*2</sup> | Database vector extension (one of [`pgvector`, `pgvecto.rs`]) | `pgvecto.rs` | server |
| `DB_HOSTNAME` | Database Host | `database` | server |
| `DB_PORT` | Database Port | `5432` | server |
| `DB_USERNAME` | Database User | `postgres` | server, database<sup>\*1</sup> |
| `DB_PASSWORD` | Database Password | `postgres` | server, database<sup>\*1</sup> |
| `DB_DATABASE_NAME` | Database Name | `immich` | server, database<sup>\*1</sup> |
| `DB_VECTOR_EXTENSION`<sup>\*2</sup> | Database Vector Extension (one of [`pgvector`, `pgvecto.rs`]) | `pgvecto.rs` | server |
| `DB_SKIP_MIGRATIONS` | Whether to skip running migrations on startup (one of [`true`, `false`]) | `false` | server |
\*1: The values of `DB_USERNAME`, `DB_PASSWORD`, and `DB_DATABASE_NAME` are passed to the Postgres container as the variables `POSTGRES_USER`, `POSTGRES_PASSWORD`, and `POSTGRES_DB` in `docker-compose.yml`.
@@ -103,18 +103,18 @@ When `DB_URL` is defined, the `DB_HOSTNAME`, `DB_PORT`, `DB_USERNAME`, `DB_PASSW
| Variable | Description | Default | Containers |
| :--------------- | :------------- | :-----: | :--------- |
| `REDIS_URL` | Redis URL | | server |
| `REDIS_SOCKET` | Redis socket | | server |
| `REDIS_HOSTNAME` | Redis host | `redis` | server |
| `REDIS_PORT` | Redis port | `6379` | server |
| `REDIS_USERNAME` | Redis username | | server |
| `REDIS_PASSWORD` | Redis password | | server |
| `REDIS_DBINDEX` | Redis DB index | `0` | server |
| `REDIS_SOCKET` | Redis Socket | | server |
| `REDIS_HOSTNAME` | Redis Host | `redis` | server |
| `REDIS_PORT` | Redis Port | `6379` | server |
| `REDIS_USERNAME` | Redis Username | | server |
| `REDIS_PASSWORD` | Redis Password | | server |
| `REDIS_DBINDEX` | Redis DB Index | `0` | server |
:::info
All `REDIS_` variables must be provided to all Immich workers, including `api` and `microservices`.
`REDIS_URL` must start with `ioredis://` and then include a `base64` encoded JSON string for the configuration.
More information can be found in the upstream [ioredis] documentation.
More info can be found in the upstream [ioredis] documentation.
When `REDIS_URL` or `REDIS_SOCKET` are defined, the `REDIS_HOSTNAME`, `REDIS_PORT`, `REDIS_USERNAME`, `REDIS_PASSWORD`, and `REDIS_DBINDEX` variables are ignored.
:::
@@ -148,28 +148,24 @@ Redis (Sentinel) URL example JSON before encoding:
## Machine Learning
| Variable | Description | Default | Containers |
| :---------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL` | Comma-separated list of (textual) CLIP model(s) to preload and cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP__VISUAL` | Comma-separated list of (visual) CLIP model(s) to preload and cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION` | Comma-separated list of (recognition) facial recognition model(s) to preload and cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION` | Comma-separated list of (detection) facial recognition model(s) to preload and cache | | machine learning |
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
| `MACHINE_LEARNING_PING_TIMEOUT` | How long (ms) to wait for a PING response when checking if an ML server is available | `2000` | server |
| `MACHINE_LEARNING_AVAILABILITY_BACKOFF_TIME` | How long to ignore ML servers that are offline before trying again | `30000` | server |
| Variable | Description | Default | Containers |
| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning |
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.
@@ -181,11 +177,7 @@ Redis (Sentinel) URL example JSON before encoding:
:::info
While the `textual` model is the only one required for smart search, some users may experience slow first searches
due to backups triggering loading of the other models into memory, which blocks other requests until completed.
To avoid this, you can preload the other models (`visual`, `recognition`, and `detection`) if you have enough RAM to do so.
Additional machine learning parameters can be tuned from the admin UI.
Other machine learning parameters can be tuned from the admin UI.
:::
@@ -216,7 +208,7 @@ the `_FILE` variable should be set to the path of a file containing the variable
details on how to use Docker Secrets in the Postgres image.
\*2: See [this comment][docker-secrets-example] for an example of how
to use a Docker secret for the password in the Redis container.
to use use a Docker secret for the password in the Redis container.
[tz-list]: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
[docker-secrets-example]: https://github.com/docker-library/redis/issues/46#issuecomment-335326234

View File

@@ -14,9 +14,6 @@ Hardware and software requirements for Immich:
If you still want to try to use a non-Linux OS, you can set it up as follows:
- Windows: [Docker Desktop on Windows](https://docs.docker.com/desktop/install/windows-install/) or [WSL 2](https://docs.docker.com/desktop/wsl/).
- macOS: [Docker Desktop on Mac](https://docs.docker.com/desktop/install/mac-install/).
- Immich runs well in a virtualized environment when running in a full virtual machine.
The use of Docker in LXC containers is [not recommended](https://pve.proxmox.com/wiki/Linux_Container), but may be possible for advanced users.
If you have issues, we recommend that you switch to a supported VM deployment.
- **RAM**: Minimum 4GB, recommended 6GB.
- **CPU**: Minimum 2 cores, recommended 4 cores.
- **Storage**: Recommended Unix-compatible filesystem (EXT4, ZFS, APFS, etc.) with support for user/group ownership and permissions.

View File

@@ -27,7 +27,7 @@ The script will perform the following actions:
1. Download [docker-compose.yml](https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml), and the [.env](https://github.com/immich-app/immich/releases/latest/download/example.env) file from the main branch of the [repository](https://github.com/immich-app/immich).
2. Start the containers.
The web application and mobile app will be available at `http://<machine-ip-address>:2283`
The web application will be available at `http://<machine-ip-address>:2283`, and the server URL for the mobile app will be `http://<machine-ip-address>:2283/api`
The directory which is used to store the library files is `./immich-app` relative to the current directory.

View File

@@ -1,76 +0,0 @@
---
sidebar_position: 85
---
# Synology [Community]
:::note
This is a community contribution and not officially supported by the Immich team, but included here for convenience.
Community support can be found in the dedicated channel on the [Discord Server](https://discord.immich.app/).
**Please report app issues to the corresponding [Github Repository](https://github.com/truenas/charts/tree/master/community/immich).**
:::
Immich can easily be installed on a Synology NAS using Container Manager within DSM. If you have not installed Container Manager already, you can install it in the Packages Center. Refer to the [Container Manager docs](https://kb.synology.com/en-us/DSM/help/ContainerManager/docker_desc?version=7) for more information on using Container Manager.
## Step 1 - Download the required files
Create a directory of your choice (e.g. `./immich-app`) to house Immich. In general, it's a best practice to have all Docker-based applications running under the `./docker` directory, so in this case, your directory structure will look like `./docker/immich-app`.
Now create a `./postgres` and `./library` directory as sub-directories of the `./docker/immich-app`.
When you're all done, you should have the following:
- `./docker/immich-app/postgres`
- `./docker/immich-app/library`
Download [`docker-compose.yml`](https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml) and [`example.env`](https://github.com/immich-app/immich/releases/latest/download/example.env) to your computer. Upload the files to the `./docker/immich-app` directory.
## Step 2 - Populate the .env file with custom values
Follow [Step 2 in Docker Compose](./docker-compose#step-2---populate-the-env-file-with-custom-values) for instructions on customizing the `.env` file, and then return back to this guide to continue.
## Step 3 - Create a new project in Container Manager
Open Container Manager, and select the "**Project**" action on the left navigation bar and then click "**Create**".
![Create Project](../../static/img/synology-container-manager-create-project.png)
In the settings of your new project, set "**Project name**" to a name you'll remember, such as _immich-app_. When setting the "**Path**", select the `./docker/immich-app` directory you created earlier. Doing so will prompt a message to use the existing `docker-compose.yml` already present in the directory for your project. Click "**OK**" to continue.
![Set Path](../../static/img/synology-container-manager-set-path.png)
The following screen will give you the option to further customize your `docker-compose.yml` file, giving you a warning regarding the `start_interval` property. Under the `healthcheck` heading, remove the `start_interval: 30s` completely and click "**Next**".
![start interval](../../static/img/synology-container-manager-customize-docker-compose.png)
Skip the section asking to set-up a portal for Web Station, and then complete the wizard which will build and start the containers for your project.
Once your containers are successfully running, navigate to the "**Container**" section of Container Manager, right-click on the "**immich-server**" container, and choose the "**Details**".
Scroll to the bottom of the "**Details**" section, and find the `IP Address` of the container, located in the `Network` section. Take note of the container's IP address as you will need it for **Step 4**.
![Container Details](../../static/img/synology-container-manager-container-details.png)
## Step 4 - Configure Firewall Settings
Once your project completes the build process, your containers will start. In order to be able to access Immich from your browser, you need to configure the firewall settings for your Synology NAS.
Open "**Control Panel**" on your Synology NAS, and select "**Security**". Navigate to "**Firewall**"
![Firewall rules](../../static/img/synology-firewall-rules.png)
Click "**Edit Rules**" and add the following firewall rules:
- Add a "**Source IP**" rule for the IP address of your container that you obtained in Step 3 above
- Add a "**Ports**" rule for the port specified in the `docker-compose.yml`, which should be `2283`
## Next Steps
Read the [Post Installation](/docs/install/post-install.mdx) steps or setup optional features below.
### Setting up optional features
- [External Libraries](/docs/features/libraries.md): Adding your existing photo library to Immich
- [Hardware Transcoding](/docs/features/hardware-transcoding.md): Speeding up video transcoding
- [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md): Speeding up various machine learning tasks in Immich

View File

@@ -41,7 +41,7 @@ className="border rounded-xl"
:::info Permissions
The **pgData** dataset must be owned by the user `netdata` (UID 999) for postgres to start. The other datasets must be owned by the user `root` (UID 0) or a group that includes the user `root` (UID 0) for immich to have the necessary permissions.
If the **library** dataset uses ACL it must have [ACL mode](https://www.truenas.com/docs/core/coretutorials/storage/pools/permissions/#access-control-lists) set to `Passthrough` if you plan on using a [storage template](/docs/administration/storage-template.mdx) and the dataset is configured for network sharing (its ACL type is set to `SMB/NFSv4`). When the template is applied and files need to be moved from **upload** to **library**, Immich performs `chmod` internally and needs to be allowed to execute the command. [More info.](https://github.com/immich-app/immich/pull/13017)
If the **library** dataset uses ACL it must have [ACL mode](https://www.truenas.com/docs/core/coretutorials/storage/pools/permissions/#access-control-lists) set to `Passthrough` if you plan on using a [storage template](/docs/administration/storage-template.mdx) and the dataset is configured for network sharing (its ACL type is set to `SMB/NFSv4`). When the template is applied and files need to be moved from **upload** to **library**, immich performs `chmod` internally and needs to be allowed to execute the command. [More info.](https://github.com/immich-app/immich/pull/13017)
:::
## Installing the Immich Application
@@ -160,10 +160,6 @@ The image above has example values.
### Additional Storage [(External Libraries)](/docs/features/libraries)
:::danger Advanced Users Only
This feature should only be used by advanced users. If this is your first time installing Immich, then DO NOT mount an external library until you have a working setup. Also, your mount path MUST be something unique and should NOT be your library or upload location or a Linux directory like `/lib`. The picture below shows a valid example.
:::
<img
src={require('./img/truenas10.webp').default}
width="40%"
@@ -172,7 +168,7 @@ className="border rounded-xl"
/>
You may configure [External Libraries](/docs/features/libraries) by mounting them using **Additional Storage**.
The **Mount Path** is the location you will need to copy and paste into the External Library settings within Immich.
The **Mount Path** is the loaction you will need to copy and paste into the External Library settings within Immich.
The **Host Path** is the location on the TrueNAS SCALE server where your external library is located.
<!-- A section for Labels would go here but I don't know what they do. -->
@@ -198,7 +194,7 @@ The **CPU** value was specified in a different format with a default of `4000m`
The **Memory** value was specified in a different format with a default of `8Gi` which is 8 GiB of RAM. The value was specified in bytes or a number with a measurement suffix. Examples: `129M`, `123Mi`, `1000000000`
:::
Enable **GPU Configuration** options if you have a GPU that you will use for [Hardware Transcoding](/docs/features/hardware-transcoding) and/or [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md). More info: [GPU Passthrough Docs for TrueNAS Apps](https://www.truenas.com/docs/truenasapps/#gpu-passthrough)
Enable **GPU Configuration** options if you have a GPU that you will use for [Hardware Transcoding](/docs/features/hardware-transcoding) and/or [Hardware-Accelerated Machine Learning](/docs/features/ml-hardware-acceleration.md). More info: [GPU Passtrough Docs for TrueNAS Apps](https://www.truenas.com/docs/truenasapps/#gpu-passthrough)
### Install

View File

@@ -72,7 +72,7 @@ alt="Select Plugins > Compose.Manager > Add New Stack > Label it Immich"
</ul>
</details>
5. Click "**Save Changes**", you will be prompted to edit stack UI labels, just leave this blank and click "**Ok**"
5. Click "**Save Changes**", you will be promoted to edit stack UI labels, just leave this blank and click "**Ok**"
6. Select the cog ⚙️ next to Immich, click "**Edit Stack**", then click "**Env File**"
7. Paste the entire contents of the [Immich example.env](https://github.com/immich-app/immich/releases/latest/download/example.env) file into the Unraid editor, then **before saving** edit the following:
@@ -111,7 +111,7 @@ alt="Go to Docker Tab and visit the address listed next to immich-web"
<details >
<summary>Using the FolderView plugin for organizing your Docker containers? Click me! Otherwise you're complete!</summary>
<p>If you are using the FolderView plugin go the Docker tab and select "<b>New Folder</b>".<br />Label it <i>"Immich"</i> and use this URL as the logo: https://raw.githubusercontent.com/immich-app/immich/main/design/immich-logo.png<br/>Then simply select all the Immich related containers before clicking "<b>Submit</b>"</p>
<p>If you are using the FolderView plugin go the Docker tab and select "<b>New Folder</b>".<br />Label it <i>"Immich"</i> and use this URL as the logo: https://raw.githubusercontent.com/immich-app/immich/main/design/immich-logo.webp<br/>Then simply select all the Immich related containers before clicking "<b>Submit</b>"</p>
<img
src={require('./img/unraid07.webp').default}
width="80%"

View File

@@ -0,0 +1,8 @@
---
sidebar_position: 2
---
# Comparison
If you're new here and came from other asset self-hosting alternatives you might want to look at a comparison between Immich and your current solution.
Here you can see a [comparison between the various OpenSource Photo Libraries](https://meichthys.github.io/foss_photo_libraries/) including Immich.

View File

@@ -1,3 +1,3 @@
Login to the mobile app with the server endpoint URL at `http://<machine-ip-address>:2283`
Login to the mobile app with the server endpoint URL at `http://<machine-ip-address>:2283/api`
<img src={require('./img/sign-in-phone.webp').default} width='50%' title='Mobile App Sign In' />

View File

@@ -110,9 +110,9 @@ const config = {
label: 'API',
},
{
href: 'https://immich.store',
to: '/blog',
position: 'right',
label: 'Merch',
label: 'Blog',
},
{
href: 'https://github.com/immich-app/immich',

6634
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -16,8 +16,8 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "~3.7.0",
"@docusaurus/preset-classic": "~3.7.0",
"@docusaurus/core": "~3.5.2",
"@docusaurus/preset-classic": "~3.5.2",
"@mdi/js": "^7.3.67",
"@mdi/react": "^1.6.1",
"@mdx-js/react": "^3.0.0",
@@ -35,9 +35,7 @@
"url": "^0.11.0"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "~3.7.0",
"@docusaurus/tsconfig": "^3.7.0",
"@docusaurus/types": "^3.7.0",
"@docusaurus/module-type-aliases": "~3.5.2",
"prettier": "^3.2.4",
"typescript": "^5.1.6"
},
@@ -57,6 +55,6 @@
"node": ">=20"
},
"volta": {
"node": "22.14.0"
"node": "22.12.0"
}
}

View File

@@ -53,11 +53,6 @@ const guides: CommunityGuidesProps[] = [
description: 'How to configure an existing fail2ban installation to block incorrect login attempts.',
url: 'https://github.com/immich-app/immich/discussions/3243#discussioncomment-6681948',
},
{
title: 'Immich remote access with NordVPN Meshnet',
description: 'Access Immich with an end-to-end encrypted connection.',
url: 'https://meshnet.nordvpn.com/how-to/remote-files-media-access/immich-remote-access',
},
];
function CommunityGuide({ title, description, url }: CommunityGuidesProps): JSX.Element {

View File

@@ -99,11 +99,6 @@ const projects: CommunityProjectProps[] = [
description: 'Downloads a configurable number of random photos based on people or album ID.',
url: 'https://github.com/jon6fingrs/immich-dl',
},
{
title: 'Immich Upload Optimizer',
description: 'Automatically optimize files uploaded to Immich in order to save storage space',
url: 'https://github.com/miguelangel-nubla/immich-upload-optimizer',
},
];
function CommunityProject({ title, description, url }: CommunityProjectProps): JSX.Element {

View File

@@ -24,13 +24,10 @@ export default function VersionSwitcher(): JSX.Element {
{ label: 'Next', url: 'https://main.preview.immich.app' },
{ label: 'Latest', url: 'https://immich.app' },
...archiveVersions,
].map(({ label, url }) => ({
label,
url: new URL(url),
}));
];
setVersions(allVersions);
const activeVersion = allVersions.find((version) => version.url.origin === window.location.origin);
const activeVersion = allVersions.find((version) => new URL(version.url).origin === window.location.origin);
if (activeVersion) {
setLabel(activeVersion.label);
}
@@ -47,12 +44,12 @@ export default function VersionSwitcher(): JSX.Element {
return (
versions.length > 0 && (
<DropdownNavbarItem
className="version-switcher-34ab39"
className="navbar__item"
label={label}
mobile={windowSize === 'mobile'}
items={versions.map(({ label, url }) => ({
label,
to: new URL(location.pathname + location.search + location.hash, url).href,
to: url,
target: '_self',
}))}
/>

View File

@@ -75,11 +75,6 @@ div[class^='announcementBar_'] {
font-weight: 500;
}
/* workaround for version switcher PR 15894 */
div[class*='navbar__items'] > li:has(a[class*='version-switcher-34ab39']) {
display: none;
}
code {
font-weight: 600;
}

View File

@@ -50,13 +50,6 @@ function HomepageHeader() {
>
Demo
</Link>
<Link
className="flex place-items-center place-content-center py-3 px-8 border bg-immich-primary/10 dark:bg-gray-300 rounded-xl hover:no-underline text-immich-primary dark:text-immich-dark-bg font-bold uppercase"
to="https://immich.store"
>
Buy Merch
</Link>
</div>
<div className="my-12 flex gap-1 font-medium place-items-center place-content-center text-immich-primary dark:text-immich-dark-primary">
@@ -80,9 +73,9 @@ function HomepageHeader() {
/>
<div>
<p className="font-bold text-2xl md:text-5xl ">Download the mobile app</p>
<p className="font-bold text-2xl md:text-5xl ">Download mobile app</p>
<p className="text-lg">
Download the Immich app and start backing up your photos and videos securely to your own server
Download Immich app and start backing up your photos and videos securely to your own server
</p>
</div>
<div className="flex flex-col sm:flex-row place-items-center place-content-center mt-4 gap-1">

View File

@@ -242,13 +242,6 @@ const roadmap: Item[] = [
];
const milestones: Item[] = [
{
icon: mdiStar,
iconColor: 'gold',
title: '60,000 Stars',
description: 'Reached 60K Stars on GitHub!',
getDateLabel: withLanguage(new Date(2025, 2, 4)),
},
withRelease({
icon: mdiLinkEdit,
iconColor: 'crimson',

View File

@@ -1,32 +1,32 @@
/docs /docs/overview/introduction 307
/docs/mobile-app-beta-program /docs/features/mobile-app 307
/docs/contribution-guidelines /docs/overview/support-the-project#contributing 307
/docs/install /docs/install/docker-compose 307
/docs/installation/one-step-installation /docs/install/script 307
/docs/installation/portainer-installation /docs/install/portainer 307
/docs/installation/recommended-installation /docs/install/docker-compose 307
/docs/installation/unraid /docs/install/unraid 307
/docs/installation/requirements /docs/install/requirements 307
/docs/overview/logo-meaning /docs/overview/logo 307
/docs/overview/technology-stack /docs/developer/architecture 307
/docs/usage/automatic-backup /docs/features/automatic-backup 307
/docs/usage/bulk-upload /docs/features/command-line-interface 307
/docs/features/bulk-upload /docs/features/command-line-interface 307
/docs/usage/oauth /docs/administration/oauth 307
/docs/usage/post-installation /docs/install/post-install 307
/docs/usage/update /docs/install/docker-compose#step-4---upgrading 307
/docs/usage/server-commands /docs/administration/server-commands 307
/docs/features/jobs /docs/administration/jobs 307
/docs/features/oauth /docs/administration/oauth 307
/docs/features/password-login /docs/administration/password-login 307
/docs/features/server-commands /docs/administration/server-commands 307
/docs/features/storage-template /docs/administration/storage-template 307
/docs/features/user-management /docs/administration/user-management 307
/docs/developer/contributing /docs/developer/pr-checklist 307
/docs/guides/machine-learning /docs/guides/remote-machine-learning 307
/docs/administration/password-login /docs/administration/system-settings 307
/docs/features/search /docs/features/searching 307
/docs/features/smart-search /docs/features/searching 307
/docs/guides/api-album-sync /docs/community-projects 307
/docs/guides/remove-offline-files /docs/community-projects 307
/milestones /roadmap 307
/docs /docs/overview/introduction 301
/docs/mobile-app-beta-program /docs/features/mobile-app 301
/docs/contribution-guidelines /docs/overview/support-the-project#contributing 301
/docs/install /docs/install/docker-compose 301
/docs/installation/one-step-installation /docs/install/script 301
/docs/installation/portainer-installation /docs/install/portainer 301
/docs/installation/recommended-installation /docs/install/docker-compose 301
/docs/installation/unraid /docs/install/unraid 301
/docs/installation/requirements /docs/install/requirements 301
/docs/overview/logo-meaning /docs/overview/logo 301
/docs/overview/technology-stack /docs/developer/architecture 301
/docs/usage/automatic-backup /docs/features/automatic-backup 301
/docs/usage/bulk-upload /docs/features/command-line-interface 301
/docs/features/bulk-upload /docs/features/command-line-interface 301
/docs/usage/oauth /docs/administration/oauth 301
/docs/usage/post-installation /docs/install/post-install 301
/docs/usage/update /docs/install/docker-compose#step-4---upgrading 301
/docs/usage/server-commands /docs/administration/server-commands 301
/docs/features/jobs /docs/administration/jobs 301
/docs/features/oauth /docs/administration/oauth 301
/docs/features/password-login /docs/administration/password-login 301
/docs/features/server-commands /docs/administration/server-commands 301
/docs/features/storage-template /docs/administration/storage-template 301
/docs/features/user-management /docs/administration/user-management 301
/docs/developer/contributing /docs/developer/pr-checklist 301
/docs/guides/machine-learning /docs/guides/remote-machine-learning 301
/docs/administration/password-login /docs/administration/system-settings 301
/docs/features/search /docs/features/searching 301
/docs/features/smart-search /docs/features/searching 301
/docs/guides/api-album-sync /docs/community-projects 301
/docs/guides/remove-offline-files /docs/community-projects 301
/milestones /roadmap 301

View File

@@ -1,60 +1,4 @@
[
{
"label": "v1.129.0",
"url": "https://v1.129.0.archive.immich.app"
},
{
"label": "v1.128.0",
"url": "https://v1.128.0.archive.immich.app"
},
{
"label": "v1.127.0",
"url": "https://v1.127.0.archive.immich.app"
},
{
"label": "v1.126.1",
"url": "https://v1.126.1.archive.immich.app"
},
{
"label": "v1.126.0",
"url": "https://v1.126.0.archive.immich.app"
},
{
"label": "v1.125.7",
"url": "https://v1.125.7.archive.immich.app"
},
{
"label": "v1.125.6",
"url": "https://v1.125.6.archive.immich.app"
},
{
"label": "v1.125.5",
"url": "https://v1.125.5.archive.immich.app"
},
{
"label": "v1.125.3",
"url": "https://v1.125.3.archive.immich.app"
},
{
"label": "v1.125.2",
"url": "https://v1.125.2.archive.immich.app"
},
{
"label": "v1.125.1",
"url": "https://v1.125.1.archive.immich.app"
},
{
"label": "v1.124.2",
"url": "https://v1.124.2.archive.immich.app"
},
{
"label": "v1.124.1",
"url": "https://v1.124.1.archive.immich.app"
},
{
"label": "v1.124.0",
"url": "https://v1.124.0.archive.immich.app"
},
{
"label": "v1.123.0",
"url": "https://v1.123.0.archive.immich.app"
@@ -205,46 +149,46 @@
},
{
"label": "v1.105.1",
"url": "https://v1.105.1.archive.immich.app"
"url": "https://v1.105.1.archive.immich.app/"
},
{
"label": "v1.105.0",
"url": "https://v1.105.0.archive.immich.app"
"url": "https://v1.105.0.archive.immich.app/"
},
{
"label": "v1.104.0",
"url": "https://v1.104.0.archive.immich.app"
"url": "https://v1.104.0.archive.immich.app/"
},
{
"label": "v1.103.1",
"url": "https://v1.103.1.archive.immich.app"
"url": "https://v1.103.1.archive.immich.app/"
},
{
"label": "v1.103.0",
"url": "https://v1.103.0.archive.immich.app"
"url": "https://v1.103.0.archive.immich.app/"
},
{
"label": "v1.102.3",
"url": "https://v1.102.3.archive.immich.app"
"url": "https://v1.102.3.archive.immich.app/"
},
{
"label": "v1.102.2",
"url": "https://v1.102.2.archive.immich.app"
"url": "https://v1.102.2.archive.immich.app/"
},
{
"label": "v1.102.1",
"url": "https://v1.102.1.archive.immich.app"
"url": "https://v1.102.1.archive.immich.app/"
},
{
"label": "v1.102.0",
"url": "https://v1.102.0.archive.immich.app"
"url": "https://v1.102.0.archive.immich.app/"
},
{
"label": "v1.101.0",
"url": "https://v1.101.0.archive.immich.app"
"url": "https://v1.101.0.archive.immich.app/"
},
{
"label": "v1.100.0",
"url": "https://v1.100.0.archive.immich.app"
"url": "https://v1.100.0.archive.immich.app/"
}
]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -5,7 +5,7 @@ module.exports = {
preflight: false, // disable Tailwind's reset
},
content: ['./src/**/*.{js,jsx,ts,tsx}', './{docs,blog}/**/*.{md,mdx}'], // my markdown stuff is in ../docs, not /src
darkMode: ['class', '[data-theme="dark"]'], // hooks into docusaurus' dark mode settings
darkMode: ['class', '[data-theme="dark"]'], // hooks into docusaurus' dark mode settigns
theme: {
extend: {
colors: {

View File

@@ -1,8 +1,9 @@
{
// This file is not used in compilation. It is here just for a nice editor experience.
"extends": "@docusaurus/tsconfig",
"extends": "@tsconfig/docusaurus/tsconfig.json",
"compilerOptions": {
"baseUrl": "."
"baseUrl": ".",
"module": "Node16"
}
}

View File

@@ -1 +1 @@
22.14.0
22.12.0

View File

@@ -34,10 +34,10 @@ services:
- 2285:2285
redis:
image: redis:6.2-alpine@sha256:148bb5411c184abd288d9aaed139c98123eeb8824c5d3fce03cf721db58066d8
image: redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
database:
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:739cdd626151ff1f796dc95a6591b55a714f341c737e27f045019ceabf8e8c52
image: tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
command: -c fsync=off -c shared_preload_libraries=vectors.so
environment:
POSTGRES_PASSWORD: postgres

1211
e2e/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "immich-e2e",
"version": "1.129.0",
"version": "1.123.0",
"description": "",
"main": "index.js",
"type": "module",
@@ -25,20 +25,20 @@
"@immich/sdk": "file:../open-api/typescript-sdk",
"@playwright/test": "^1.44.1",
"@types/luxon": "^3.4.2",
"@types/node": "^22.13.5",
"@types/node": "^22.10.2",
"@types/oidc-provider": "^8.5.1",
"@types/pg": "^8.11.0",
"@types/pngjs": "^6.0.4",
"@types/supertest": "^6.0.2",
"@typescript-eslint/eslint-plugin": "^8.15.0",
"@typescript-eslint/parser": "^8.15.0",
"@vitest/coverage-v8": "^3.0.0",
"@vitest/coverage-v8": "^2.0.5",
"eslint": "^9.14.0",
"eslint-config-prettier": "^10.0.0",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^56.0.1",
"exiftool-vendored": "^28.3.1",
"globals": "^16.0.0",
"globals": "^15.9.0",
"jose": "^5.6.3",
"luxon": "^3.4.4",
"oidc-provider": "^8.5.1",
@@ -50,9 +50,9 @@
"supertest": "^7.0.0",
"typescript": "^5.3.3",
"utimes": "^5.2.1",
"vitest": "^3.0.0"
"vitest": "^2.0.5"
},
"volta": {
"node": "22.14.0"
"node": "22.12.0"
}
}

View File

@@ -22,92 +22,79 @@ const user1NotShared = 'user1NotShared';
const user2SharedUser = 'user2SharedUser';
const user2SharedLink = 'user2SharedLink';
const user2NotShared = 'user2NotShared';
const user4DeletedAsset = 'user4DeletedAsset';
const user4Empty = 'user4Empty';
describe('/albums', () => {
let admin: LoginResponseDto;
let user1: LoginResponseDto;
let user1Asset1: AssetMediaResponseDto;
let user1Asset2: AssetMediaResponseDto;
let user4Asset1: AssetMediaResponseDto;
let user1Albums: AlbumResponseDto[];
let user2: LoginResponseDto;
let user2Albums: AlbumResponseDto[];
let deletedAssetAlbum: AlbumResponseDto;
let user3: LoginResponseDto; // deleted
let user4: LoginResponseDto;
beforeAll(async () => {
await utils.resetDatabase();
admin = await utils.adminSetup();
[user1, user2, user3, user4] = await Promise.all([
[user1, user2, user3] = await Promise.all([
utils.userSetup(admin.accessToken, createUserDto.user1),
utils.userSetup(admin.accessToken, createUserDto.user2),
utils.userSetup(admin.accessToken, createUserDto.user3),
utils.userSetup(admin.accessToken, createUserDto.user4),
]);
[user1Asset1, user1Asset2, user4Asset1] = await Promise.all([
[user1Asset1, user1Asset2] = await Promise.all([
utils.createAsset(user1.accessToken, { isFavorite: true }),
utils.createAsset(user1.accessToken),
utils.createAsset(user1.accessToken),
]);
[user1Albums, user2Albums, deletedAssetAlbum] = await Promise.all([
Promise.all([
utils.createAlbum(user1.accessToken, {
albumName: user1SharedEditorUser,
albumUsers: [
{ userId: admin.userId, role: AlbumUserRole.Editor },
{ userId: user2.userId, role: AlbumUserRole.Editor },
],
assetIds: [user1Asset1.id],
}),
utils.createAlbum(user1.accessToken, {
albumName: user1SharedLink,
assetIds: [user1Asset1.id],
}),
utils.createAlbum(user1.accessToken, {
albumName: user1NotShared,
assetIds: [user1Asset1.id, user1Asset2.id],
}),
utils.createAlbum(user1.accessToken, {
albumName: user1SharedViewerUser,
albumUsers: [{ userId: user2.userId, role: AlbumUserRole.Viewer }],
assetIds: [user1Asset1.id],
}),
]),
Promise.all([
utils.createAlbum(user2.accessToken, {
albumName: user2SharedUser,
albumUsers: [
{ userId: user1.userId, role: AlbumUserRole.Editor },
{ userId: user3.userId, role: AlbumUserRole.Editor },
],
}),
utils.createAlbum(user2.accessToken, { albumName: user2SharedLink }),
utils.createAlbum(user2.accessToken, { albumName: user2NotShared }),
]),
utils.createAlbum(user4.accessToken, { albumName: user4DeletedAsset }),
utils.createAlbum(user4.accessToken, { albumName: user4Empty }),
utils.createAlbum(user3.accessToken, {
albumName: 'Deleted',
albumUsers: [{ userId: user1.userId, role: AlbumUserRole.Editor }],
user1Albums = await Promise.all([
utils.createAlbum(user1.accessToken, {
albumName: user1SharedEditorUser,
albumUsers: [{ userId: user2.userId, role: AlbumUserRole.Editor }],
assetIds: [user1Asset1.id],
}),
utils.createAlbum(user1.accessToken, {
albumName: user1SharedLink,
assetIds: [user1Asset1.id],
}),
utils.createAlbum(user1.accessToken, {
albumName: user1NotShared,
assetIds: [user1Asset1.id, user1Asset2.id],
}),
utils.createAlbum(user1.accessToken, {
albumName: user1SharedViewerUser,
albumUsers: [{ userId: user2.userId, role: AlbumUserRole.Viewer }],
assetIds: [user1Asset1.id],
}),
]);
user2Albums = await Promise.all([
utils.createAlbum(user2.accessToken, {
albumName: user2SharedUser,
albumUsers: [
{ userId: user1.userId, role: AlbumUserRole.Editor },
{ userId: user3.userId, role: AlbumUserRole.Editor },
],
}),
utils.createAlbum(user2.accessToken, { albumName: user2SharedLink }),
utils.createAlbum(user2.accessToken, { albumName: user2NotShared }),
]);
await utils.createAlbum(user3.accessToken, {
albumName: 'Deleted',
albumUsers: [{ userId: user1.userId, role: AlbumUserRole.Editor }],
});
await addAssetsToAlbum(
{ id: user2Albums[0].id, bulkIdsDto: { ids: [user1Asset1.id, user1Asset2.id] } },
{ headers: asBearerAuth(user1.accessToken) },
);
user2Albums[0] = await getAlbumInfo({ id: user2Albums[0].id }, { headers: asBearerAuth(user2.accessToken) });
await Promise.all([
addAssetsToAlbum(
{ id: user2Albums[0].id, bulkIdsDto: { ids: [user1Asset1.id, user1Asset2.id] } },
{ headers: asBearerAuth(user1.accessToken) },
),
addAssetsToAlbum(
{ id: deletedAssetAlbum.id, bulkIdsDto: { ids: [user4Asset1.id] } },
{ headers: asBearerAuth(user4.accessToken) },
),
// add shared link to user1SharedLink album
utils.createSharedLink(user1.accessToken, {
type: SharedLinkType.Album,
@@ -120,11 +107,7 @@ describe('/albums', () => {
}),
]);
[user2Albums[0]] = await Promise.all([
getAlbumInfo({ id: user2Albums[0].id }, { headers: asBearerAuth(user2.accessToken) }),
deleteUserAdmin({ id: user3.userId, userAdminDeleteDto: {} }, { headers: asBearerAuth(admin.accessToken) }),
utils.deleteAssets(user1.accessToken, [user4Asset1.id]),
]);
await deleteUserAdmin({ id: user3.userId, userAdminDeleteDto: {} }, { headers: asBearerAuth(admin.accessToken) });
});
describe('GET /albums', () => {
@@ -159,10 +142,6 @@ describe('/albums', () => {
...user1Albums[0],
assets: [expect.objectContaining({ isFavorite: false })],
lastModifiedAssetTimestamp: expect.any(String),
startDate: expect.any(String),
endDate: expect.any(String),
shared: true,
albumUsers: expect.any(Array),
});
});
@@ -301,25 +280,6 @@ describe('/albums', () => {
expect(status).toBe(200);
expect(body).toHaveLength(5);
});
it('should return empty albums and albums where all assets are deleted', async () => {
const { status, body } = await request(app).get('/albums').set('Authorization', `Bearer ${user4.accessToken}`);
expect(status).toBe(200);
expect(body).toEqual(
expect.arrayContaining([
expect.objectContaining({
ownerId: user4.userId,
albumName: user4DeletedAsset,
shared: false,
}),
expect.objectContaining({
ownerId: user4.userId,
albumName: user4Empty,
shared: false,
}),
]),
);
});
});
describe('GET /albums/:id', () => {
@@ -339,10 +299,6 @@ describe('/albums', () => {
...user1Albums[0],
assets: [expect.objectContaining({ id: user1Albums[0].assets[0].id })],
lastModifiedAssetTimestamp: expect.any(String),
startDate: expect.any(String),
endDate: expect.any(String),
albumUsers: expect.any(Array),
shared: true,
});
});
@@ -374,10 +330,6 @@ describe('/albums', () => {
...user1Albums[0],
assets: [expect.objectContaining({ id: user1Albums[0].assets[0].id })],
lastModifiedAssetTimestamp: expect.any(String),
startDate: expect.any(String),
endDate: expect.any(String),
albumUsers: expect.any(Array),
shared: true,
});
});
@@ -392,30 +344,6 @@ describe('/albums', () => {
assets: [],
assetCount: 1,
lastModifiedAssetTimestamp: expect.any(String),
endDate: expect.any(String),
startDate: expect.any(String),
albumUsers: expect.any(Array),
shared: true,
});
});
it('should not count trashed assets', async () => {
await utils.deleteAssets(user1.accessToken, [user1Asset2.id]);
const { status, body } = await request(app)
.get(`/albums/${user2Albums[0].id}?withoutAssets=true`)
.set('Authorization', `Bearer ${user1.accessToken}`);
expect(status).toBe(200);
expect(body).toEqual({
...user2Albums[0],
assets: [],
assetCount: 1,
lastModifiedAssetTimestamp: expect.any(String),
endDate: expect.any(String),
startDate: expect.any(String),
albumUsers: expect.any(Array),
shared: true,
});
});
});

View File

@@ -3,10 +3,11 @@ import {
AssetMediaStatus,
AssetResponseDto,
AssetTypeEnum,
getAssetInfo,
getMyUser,
LoginResponseDto,
SharedLinkType,
getAssetInfo,
getConfig,
getMyUser,
updateConfig,
} from '@immich/sdk';
import { exiftool } from 'exiftool-vendored';
@@ -18,7 +19,7 @@ import { Socket } from 'socket.io-client';
import { createUserDto, uuidDto } from 'src/fixtures';
import { makeRandomImage } from 'src/generators';
import { errorDto } from 'src/responses';
import { app, asBearerAuth, tempDir, TEN_TIMES, testAssetDir, utils } from 'src/utils';
import { app, asBearerAuth, tempDir, testAssetDir, utils } from 'src/utils';
import request from 'supertest';
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
@@ -40,10 +41,14 @@ const makeUploadDto = (options?: { omit: string }): Record<string, any> => {
return dto;
};
const TEN_TIMES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
const locationAssetFilepath = `${testAssetDir}/metadata/gps-position/thompson-springs.jpg`;
const ratingAssetFilepath = `${testAssetDir}/metadata/rating/mongolels.jpg`;
const facesAssetFilepath = `${testAssetDir}/metadata/faces/portrait.jpg`;
const getSystemConfig = (accessToken: string) => getConfig({ headers: asBearerAuth(accessToken) });
const readTags = async (bytes: Buffer, filename: string) => {
const filepath = join(tempDir, filename);
await writeFile(filepath, bytes);
@@ -225,7 +230,7 @@ describe('/asset', () => {
});
it('should get the asset faces', async () => {
const config = await utils.getSystemConfig(admin.accessToken);
const config = await getSystemConfig(admin.accessToken);
config.metadata.faces.import = true;
await updateConfig({ systemConfigDto: config }, { headers: asBearerAuth(admin.accessToken) });
@@ -698,20 +703,6 @@ describe('/asset', () => {
expect(status).toEqual(200);
});
it('should set the negative rating', async () => {
const { status, body } = await request(app)
.put(`/assets/${user1Assets[0].id}`)
.set('Authorization', `Bearer ${user1.accessToken}`)
.send({ rating: -1 });
expect(body).toMatchObject({
id: user1Assets[0].id,
exifInfo: expect.objectContaining({
rating: -1,
}),
});
expect(status).toEqual(200);
});
it('should reject invalid rating', async () => {
for (const test of [{ rating: 7 }, { rating: 3.5 }, { rating: null }]) {
const { status, body } = await request(app)
@@ -775,7 +766,7 @@ describe('/asset', () => {
expect(body).toEqual(errorDto.badRequest('Not found or no asset.delete access'));
});
it('should move an asset to trash', async () => {
it('should move an asset to the trash', async () => {
const { id: assetId } = await utils.createAsset(admin.accessToken);
const before = await utils.getAssetInfo(admin.accessToken, assetId);
@@ -791,38 +782,6 @@ describe('/asset', () => {
expect(after.isTrashed).toBe(true);
});
it('should permanently delete an asset from trash', async () => {
const { id: assetId } = await utils.createAsset(admin.accessToken);
{
const { status } = await request(app)
.delete('/assets')
.send({ ids: [assetId] })
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(204);
}
const trashed = await utils.getAssetInfo(admin.accessToken, assetId);
expect(trashed.isTrashed).toBe(true);
{
const { status } = await request(app)
.delete('/assets')
.send({ ids: [assetId], force: true })
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(204);
}
await utils.waitForWebsocketEvent({ event: 'assetDelete', id: assetId });
{
const { status } = await request(app)
.get(`/assets/${assetId}`)
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(400);
}
});
it('should clean up live photos', async () => {
const { id: motionId } = await utils.createAsset(admin.accessToken, {
assetData: { filename: 'test.mp4', bytes: makeRandomImage() },

View File

@@ -1,225 +0,0 @@
import { JobCommand, JobName, LoginResponseDto, updateConfig } from '@immich/sdk';
import { cpSync, rmSync } from 'node:fs';
import { readFile } from 'node:fs/promises';
import { basename } from 'node:path';
import { errorDto } from 'src/responses';
import { app, asBearerAuth, testAssetDir, utils } from 'src/utils';
import request from 'supertest';
import { afterEach, beforeAll, describe, expect, it } from 'vitest';
describe('/jobs', () => {
let admin: LoginResponseDto;
beforeAll(async () => {
await utils.resetDatabase();
admin = await utils.adminSetup({ onboarding: false });
});
describe('PUT /jobs', () => {
afterEach(async () => {
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.FaceDetection, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.SmartSearch, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.DuplicateDetection, {
command: JobCommand.Resume,
force: false,
});
const config = await utils.getSystemConfig(admin.accessToken);
config.machineLearning.duplicateDetection.enabled = false;
config.machineLearning.enabled = false;
config.metadata.faces.import = false;
config.machineLearning.clip.enabled = false;
await updateConfig({ systemConfigDto: config }, { headers: asBearerAuth(admin.accessToken) });
});
it('should require authentication', async () => {
const { status, body } = await request(app).put('/jobs/metadataExtraction');
expect(status).toBe(401);
expect(body).toEqual(errorDto.unauthorized);
});
it('should queue metadata extraction for missing assets', async () => {
const path = `${testAssetDir}/formats/raw/Nikon/D700/philadelphia.nef`;
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Pause,
force: false,
});
const { id } = await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path), filename: basename(path) },
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
{
const asset = await utils.getAssetInfo(admin.accessToken, id);
expect(asset.exifInfo).toBeDefined();
expect(asset.exifInfo?.make).toBeNull();
}
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Empty,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Start,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
{
const asset = await utils.getAssetInfo(admin.accessToken, id);
expect(asset.exifInfo).toBeDefined();
expect(asset.exifInfo?.make).toBe('NIKON CORPORATION');
}
});
it('should not re-extract metadata for existing assets', async () => {
const path = `${testAssetDir}/temp/metadata/asset.jpg`;
cpSync(`${testAssetDir}/formats/raw/Nikon/D700/philadelphia.nef`, path);
const { id } = await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path), filename: basename(path) },
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
{
const asset = await utils.getAssetInfo(admin.accessToken, id);
expect(asset.exifInfo).toBeDefined();
expect(asset.exifInfo?.model).toBe('NIKON D700');
}
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, path);
await utils.jobCommand(admin.accessToken, JobName.MetadataExtraction, {
command: JobCommand.Start,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
{
const asset = await utils.getAssetInfo(admin.accessToken, id);
expect(asset.exifInfo).toBeDefined();
expect(asset.exifInfo?.model).toBe('NIKON D700');
}
rmSync(path);
});
it('should queue thumbnail extraction for assets missing thumbs', async () => {
const path = `${testAssetDir}/albums/nature/tanners_ridge.jpg`;
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Pause,
force: false,
});
const { id } = await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path), filename: basename(path) },
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
const assetBefore = await utils.getAssetInfo(admin.accessToken, id);
expect(assetBefore.thumbhash).toBeNull();
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Empty,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Resume,
force: false,
});
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Start,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
const assetAfter = await utils.getAssetInfo(admin.accessToken, id);
expect(assetAfter.thumbhash).not.toBeNull();
});
it('should not reload existing thumbnail when running thumb job for missing assets', async () => {
const path = `${testAssetDir}/temp/thumbs/asset1.jpg`;
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, path);
const { id } = await utils.createAsset(admin.accessToken, {
assetData: { bytes: await readFile(path), filename: basename(path) },
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
const assetBefore = await utils.getAssetInfo(admin.accessToken, id);
cpSync(`${testAssetDir}/albums/nature/notocactus_minimus.jpg`, path);
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Resume,
force: false,
});
// This runs the missing thumbnail job
await utils.jobCommand(admin.accessToken, JobName.ThumbnailGeneration, {
command: JobCommand.Start,
force: false,
});
await utils.waitForQueueFinish(admin.accessToken, JobName.MetadataExtraction);
await utils.waitForQueueFinish(admin.accessToken, JobName.ThumbnailGeneration);
const assetAfter = await utils.getAssetInfo(admin.accessToken, id);
// Asset 1 thumbnail should be untouched since its thumb should not have been reloaded, even though the file was changed
expect(assetAfter.thumbhash).toEqual(assetBefore.thumbhash);
rmSync(path);
});
});
});

View File

@@ -1,4 +1,4 @@
import { LibraryResponseDto, LoginResponseDto, getAllLibraries } from '@immich/sdk';
import { LibraryResponseDto, LoginResponseDto, getAllLibraries, scanLibrary } from '@immich/sdk';
import { cpSync, existsSync, rmSync, unlinkSync } from 'node:fs';
import { Socket } from 'socket.io-client';
import { userDto, uuidDto } from 'src/fixtures';
@@ -8,6 +8,8 @@ import request from 'supertest';
import { utimes } from 'utimes';
import { afterAll, beforeAll, beforeEach, describe, expect, it } from 'vitest';
const scan = async (accessToken: string, id: string) => scanLibrary({ id }, { headers: asBearerAuth(accessToken) });
describe('/libraries', () => {
let admin: LoginResponseDto;
let user: LoginResponseDto;
@@ -296,35 +298,13 @@ describe('/libraries', () => {
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets } = await utils.searchAssets(admin.accessToken, {
originalPath: `${testAssetDirInternal}/temp/directoryA/assetA.png`,
libraryId: library.id,
});
expect(assets.count).toBe(1);
});
it('should process metadata and thumbnails for external asset', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/directoryA`],
});
await utils.scan(admin.accessToken, library.id);
const { assets } = await utils.searchAssets(admin.accessToken, {
originalPath: `${testAssetDirInternal}/temp/directoryA/assetA.png`,
libraryId: library.id,
});
expect(assets.count).toBe(1);
const asset = assets.items[0];
expect(asset.exifInfo).not.toBe(null);
expect(asset.exifInfo?.dateTimeOriginal).not.toBe(null);
expect(asset.thumbhash).not.toBe(null);
});
it('should scan external library with exclusion pattern', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
@@ -332,7 +312,13 @@ describe('/libraries', () => {
exclusionPatterns: ['**/directoryA'],
});
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -346,7 +332,13 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/directoryA`, `${testAssetDirInternal}/temp/directoryB`],
});
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -365,7 +357,13 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/folder, a/assetA.png`);
utils.createImageFile(`${testAssetDir}/temp/folder, b/assetB.png`);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -387,7 +385,13 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/folder{ a/assetA.png`);
utils.createImageFile(`${testAssetDir}/temp/folder} b/assetB.png`);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -439,7 +443,13 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/folder${char}1/asset1.png`);
utils.createImageFile(`${testAssetDir}/temp/folder${char}2/asset2.png`);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -463,12 +473,23 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets } = await utils.searchAssets(admin.accessToken, {
libraryId: library.id,
@@ -499,12 +520,21 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets } = await utils.searchAssets(admin.accessToken, {
libraryId: library.id,
@@ -526,47 +556,6 @@ describe('/libraries', () => {
utils.removeImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
});
it('should not reimport a modified file more than once', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/reimport`],
});
utils.createImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
cpSync(`${testAssetDir}/albums/nature/tanners_ridge.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
cpSync(`${testAssetDir}/albums/nature/el_torcal_rocks.jpg`, `${testAssetDir}/temp/reimport/asset.jpg`);
await utimes(`${testAssetDir}/temp/reimport/asset.jpg`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
const { assets } = await utils.searchAssets(admin.accessToken, {
libraryId: library.id,
});
expect(assets.count).toEqual(1);
const asset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(asset).toEqual(
expect.objectContaining({
originalFileName: 'asset.jpg',
exifInfo: expect.objectContaining({
model: 'NIKON D750',
}),
}),
);
utils.removeImageFile(`${testAssetDir}/temp/reimport/asset.jpg`);
});
it('should set an asset offline if its file is missing', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
@@ -575,14 +564,21 @@ describe('/libraries', () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
utils.removeImageFile(`${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(trashedAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
@@ -592,7 +588,7 @@ describe('/libraries', () => {
expect(newAssets.items).toEqual([]);
});
it('should set an asset offline if its file is not in any import path', async () => {
it('should set an asset offline its file is not in any import path', async () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
const library = await utils.createLibrary(admin.accessToken, {
@@ -600,18 +596,26 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
utils.createDirectory(`${testAssetDir}/temp/another-path/`);
await utils.updateLibrary(admin.accessToken, library.id, {
importPaths: [`${testAssetDirInternal}/temp/another-path/`],
});
await request(app)
.put(`/libraries/${library.id}`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({ importPaths: [`${testAssetDirInternal}/temp/another-path/`] });
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(trashedAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
@@ -631,7 +635,8 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, {
libraryId: library.id,
@@ -639,9 +644,13 @@ describe('/libraries', () => {
});
expect(assets.count).toBe(1);
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/directoryB/**'] });
await request(app)
.put(`/libraries/${library.id}`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({ exclusionPatterns: ['**/directoryB/**'] });
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const trashedAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(trashedAsset.isTrashed).toBe(true);
@@ -657,18 +666,25 @@ describe('/libraries', () => {
]);
});
it('should not set an asset offline if its file exists, is in an import path, and not covered by an exclusion pattern', async () => {
it('should not trash an online asset', async () => {
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets: assetsBefore } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assetsBefore.count).toBeGreaterThan(1);
await utils.scan(admin.accessToken, library.id);
const { status } = await request(app)
.post(`/libraries/${library.id}/scan`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send();
expect(status).toBe(204);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -685,7 +701,11 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -708,7 +728,10 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -732,7 +755,10 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -756,13 +782,19 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
unlinkSync(`${testAssetDir}/temp/xmp/glarus.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -785,12 +817,18 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -813,12 +851,18 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/metadata/xmp/dates/2000.xmp`, `${testAssetDir}/temp/xmp/glarus.nef.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -842,13 +886,19 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
cpSync(`${testAssetDir}/metadata/xmp/dates/2010.xmp`, `${testAssetDir}/temp/xmp/glarus.xmp`);
unlinkSync(`${testAssetDir}/temp/xmp/glarus.nef.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -872,12 +922,18 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
unlinkSync(`${testAssetDir}/temp/xmp/glarus.nef.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -901,12 +957,18 @@ describe('/libraries', () => {
cpSync(`${testAssetDir}/formats/raw/Nikon/D80/glarus.nef`, `${testAssetDir}/temp/xmp/glarus.nef`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_000);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
unlinkSync(`${testAssetDir}/temp/xmp/glarus.xmp`);
await utimes(`${testAssetDir}/temp/xmp/glarus.nef`, 447_775_200_001);
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
await utils.waitForQueueFinish(admin.accessToken, 'sidecar');
await utils.waitForQueueFinish(admin.accessToken, 'metadataExtraction');
const { assets: newAssets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
@@ -920,144 +982,6 @@ describe('/libraries', () => {
rmSync(`${testAssetDir}/temp/xmp`, { recursive: true, force: true });
});
});
it('should set an offline asset to online if its file exists, is in an import path, and not covered by an exclusion pattern', async () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
await utils.scan(admin.accessToken, library.id);
const offlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(offlineAsset.isTrashed).toBe(true);
expect(offlineAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
expect(offlineAsset.isOffline).toBe(true);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
}
utils.renameImageFile(`${testAssetDir}/temp/offline.png`, `${testAssetDir}/temp/offline/offline.png`);
await utils.scan(admin.accessToken, library.id);
const backOnlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(backOnlineAsset.isTrashed).toBe(false);
expect(backOnlineAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
expect(backOnlineAsset.isOffline).toBe(false);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
expect(assets.count).toBe(1);
}
});
it('should not set an offline asset to online if its file exists, is not covered by an exclusion pattern, but is outside of all import paths', async () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
}
const offlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(offlineAsset.isTrashed).toBe(true);
expect(offlineAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
expect(offlineAsset.isOffline).toBe(true);
utils.renameImageFile(`${testAssetDir}/temp/offline.png`, `${testAssetDir}/temp/offline/offline.png`);
utils.createDirectory(`${testAssetDir}/temp/another-path/`);
await utils.updateLibrary(admin.accessToken, library.id, {
importPaths: [`${testAssetDirInternal}/temp/another-path`],
});
await utils.scan(admin.accessToken, library.id);
const stillOfflineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(stillOfflineAsset.isTrashed).toBe(true);
expect(stillOfflineAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
expect(stillOfflineAsset.isOffline).toBe(true);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
}
utils.removeDirectory(`${testAssetDir}/temp/another-path/`);
});
it('should not set an offline asset to online if its file exists, is in an import path, but is covered by an exclusion pattern', async () => {
utils.createImageFile(`${testAssetDir}/temp/offline/offline.png`);
const library = await utils.createLibrary(admin.accessToken, {
ownerId: admin.userId,
importPaths: [`${testAssetDirInternal}/temp/offline`],
});
await utils.scan(admin.accessToken, library.id);
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id });
utils.renameImageFile(`${testAssetDir}/temp/offline/offline.png`, `${testAssetDir}/temp/offline.png`);
await utils.scan(admin.accessToken, library.id);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
}
const offlineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(offlineAsset.isTrashed).toBe(true);
expect(offlineAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
expect(offlineAsset.isOffline).toBe(true);
utils.renameImageFile(`${testAssetDir}/temp/offline.png`, `${testAssetDir}/temp/offline/offline.png`);
await utils.updateLibrary(admin.accessToken, library.id, { exclusionPatterns: ['**/offline/**'] });
await utils.scan(admin.accessToken, library.id);
const stillOfflineAsset = await utils.getAssetInfo(admin.accessToken, assets.items[0].id);
expect(stillOfflineAsset.isTrashed).toBe(true);
expect(stillOfflineAsset.originalPath).toBe(`${testAssetDirInternal}/temp/offline/offline.png`);
expect(stillOfflineAsset.isOffline).toBe(true);
{
const { assets } = await utils.searchAssets(admin.accessToken, { libraryId: library.id, withDeleted: true });
expect(assets.count).toBe(1);
}
});
});
describe('POST /libraries/:id/validate', () => {
@@ -1165,7 +1089,8 @@ describe('/libraries', () => {
importPaths: [`${testAssetDirInternal}/temp`],
});
await utils.scan(admin.accessToken, library.id);
await scan(admin.accessToken, library.id);
await utils.waitForQueueFinish(admin.accessToken, 'library');
const { status, body } = await request(app)
.delete(`/libraries/${library.id}`)

View File

@@ -93,6 +93,8 @@ describe('/memories', () => {
data: { year: 2021 },
createdAt: expect.any(String),
updatedAt: expect.any(String),
deletedAt: null,
seenAt: null,
isSaved: false,
memoryAt: expect.any(String),
ownerId: user.userId,

View File

@@ -13,8 +13,8 @@ import request from 'supertest';
import { beforeAll, describe, expect, it } from 'vitest';
const authServer = {
internal: 'http://auth-server:2286',
external: 'http://127.0.0.1:2286',
internal: 'http://auth-server:3000',
external: 'http://127.0.0.1:3000',
};
const mobileOverrideRedirectUri = 'https://photos.immich.app/oauth/mobile-redirect';

View File

@@ -1,7 +1,7 @@
import { getPerson, LoginResponseDto, PersonResponseDto } from '@immich/sdk';
import { LoginResponseDto, PersonResponseDto } from '@immich/sdk';
import { uuidDto } from 'src/fixtures';
import { errorDto } from 'src/responses';
import { app, asBearerAuth, utils } from 'src/utils';
import { app, utils } from 'src/utils';
import request from 'supertest';
import { beforeAll, beforeEach, describe, expect, it } from 'vitest';
@@ -195,29 +195,12 @@ describe('/people', () => {
.send({
name: 'New Person',
birthDate: '1990-01-01',
color: '#333',
});
expect(status).toBe(201);
expect(body).toMatchObject({
id: expect.any(String),
name: 'New Person',
birthDate: '1990-01-01T00:00:00.000Z',
});
});
it('should create a favorite person', async () => {
const { status, body } = await request(app)
.post(`/people`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({
name: 'New Favorite Person',
isFavorite: true,
});
expect(status).toBe(201);
expect(body).toMatchObject({
id: expect.any(String),
name: 'New Favorite Person',
isFavorite: true,
birthDate: '1990-01-01',
});
});
});
@@ -233,7 +216,6 @@ describe('/people', () => {
{ key: 'name', type: 'string' },
{ key: 'featureFaceAssetId', type: 'string' },
{ key: 'isHidden', type: 'boolean value' },
{ key: 'isFavorite', type: 'boolean value' },
]) {
it(`should not allow null ${key}`, async () => {
const { status, body } = await request(app)
@@ -262,7 +244,7 @@ describe('/people', () => {
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({ birthDate: '1990-01-01' });
expect(status).toBe(200);
expect(body).toMatchObject({ birthDate: '1990-01-01T00:00:00.000Z' });
expect(body).toMatchObject({ birthDate: '1990-01-01' });
});
it('should clear a date of birth', async () => {
@@ -273,42 +255,6 @@ describe('/people', () => {
expect(status).toBe(200);
expect(body).toMatchObject({ birthDate: null });
});
it('should set a color', async () => {
const { status, body } = await request(app)
.put(`/people/${visiblePerson.id}`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({ color: '#555' });
expect(status).toBe(200);
expect(body).toMatchObject({ color: '#555' });
});
it('should clear a color', async () => {
const { status, body } = await request(app)
.put(`/people/${visiblePerson.id}`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({ color: null });
expect(status).toBe(200);
expect(body.color).toBeUndefined();
});
it('should mark a person as favorite', async () => {
const person = await utils.createPerson(admin.accessToken, {
name: 'visible_person',
});
expect(person.isFavorite).toBe(false);
const { status, body } = await request(app)
.put(`/people/${person.id}`)
.set('Authorization', `Bearer ${admin.accessToken}`)
.send({ isFavorite: true });
expect(status).toBe(200);
expect(body).toMatchObject({ isFavorite: true });
const person2 = await getPerson({ id: person.id }, { headers: asBearerAuth(admin.accessToken) });
expect(person2).toMatchObject({ id: person.id, isFavorite: true });
});
});
describe('POST /people/:id/merge', () => {

View File

@@ -1,10 +1,10 @@
import { AssetMediaResponseDto, AssetResponseDto, deleteAssets, LoginResponseDto, updateAsset } from '@immich/sdk';
import { AssetMediaResponseDto, LoginResponseDto, deleteAssets, updateAsset } from '@immich/sdk';
import { DateTime } from 'luxon';
import { readFile } from 'node:fs/promises';
import { join } from 'node:path';
import { Socket } from 'socket.io-client';
import { errorDto } from 'src/responses';
import { app, asBearerAuth, TEN_TIMES, testAssetDir, utils } from 'src/utils';
import { app, asBearerAuth, testAssetDir, utils } from 'src/utils';
import request from 'supertest';
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
const today = DateTime.now();
@@ -462,55 +462,6 @@ describe('/search', () => {
});
});
describe('POST /search/random', () => {
beforeAll(async () => {
await Promise.all([
utils.createAsset(admin.accessToken),
utils.createAsset(admin.accessToken),
utils.createAsset(admin.accessToken),
utils.createAsset(admin.accessToken),
utils.createAsset(admin.accessToken),
utils.createAsset(admin.accessToken),
]);
await utils.waitForQueueFinish(admin.accessToken, 'thumbnailGeneration');
});
it('should require authentication', async () => {
const { status, body } = await request(app).post('/search/random').send({ size: 1 });
expect(status).toBe(401);
expect(body).toEqual(errorDto.unauthorized);
});
it.each(TEN_TIMES)('should return 1 random assets', async () => {
const { status, body } = await request(app)
.post('/search/random')
.send({ size: 1 })
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(200);
const assets: AssetResponseDto[] = body;
expect(assets.length).toBe(1);
expect(assets[0].ownerId).toBe(admin.userId);
});
it.each(TEN_TIMES)('should return 2 random assets', async () => {
const { status, body } = await request(app)
.post('/search/random')
.send({ size: 2 })
.set('Authorization', `Bearer ${admin.accessToken}`);
expect(status).toBe(200);
const assets: AssetResponseDto[] = body;
expect(assets.length).toBe(2);
expect(assets[0].ownerId).toBe(admin.userId);
expect(assets[1].ownerId).toBe(admin.userId);
});
});
describe('GET /search/explore', () => {
it('should require authentication', async () => {
const { status, body } = await request(app).get('/search/explore');

View File

@@ -89,13 +89,13 @@ describe('/shared-links', () => {
await deleteUserAdmin({ id: user2.userId, userAdminDeleteDto: {} }, { headers: asBearerAuth(admin.accessToken) });
});
describe('GET /share/:key', () => {
describe('GET /share/${key}', () => {
it('should have correct asset count in meta tag for non-empty album', async () => {
const resp = await request(shareUrl).get(`/${linkWithMetadata.key}`);
expect(resp.status).toBe(200);
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(
`<meta name="description" content="${metadataAlbum.assets.length} shared photos &amp; videos" />`,
`<meta name="description" content="${metadataAlbum.assets.length} shared photos & videos" />`,
);
});
@@ -103,14 +103,14 @@ describe('/shared-links', () => {
const resp = await request(shareUrl).get(`/${linkWithAlbum.key}`);
expect(resp.status).toBe(200);
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(`<meta name="description" content="0 shared photos &amp; videos" />`);
expect(resp.text).toContain(`<meta name="description" content="0 shared photos & videos" />`);
});
it('should have correct asset count in meta tag for shared asset', async () => {
const resp = await request(shareUrl).get(`/${linkWithAssets.key}`);
expect(resp.status).toBe(200);
expect(resp.header['content-type']).toContain('text/html');
expect(resp.text).toContain(`<meta name="description" content="1 shared photos &amp; videos" />`);
expect(resp.text).toContain(`<meta name="description" content="1 shared photos & videos" />`);
});
it('should have fqdn og:image meta tag for shared asset', async () => {
@@ -139,10 +139,7 @@ describe('/shared-links', () => {
expect(body).toEqual(
expect.arrayContaining([
expect.objectContaining({ id: linkWithAlbum.id }),
expect.objectContaining({
id: linkWithAssets.id,
assets: expect.arrayContaining([expect.objectContaining({ id: asset1.id })]),
}),
expect.objectContaining({ id: linkWithAssets.id }),
expect.objectContaining({ id: linkWithPassword.id }),
expect.objectContaining({ id: linkWithMetadata.id }),
expect.objectContaining({ id: linkWithoutMetadata.id }),
@@ -150,30 +147,6 @@ describe('/shared-links', () => {
);
});
it('should filter on albumId', async () => {
const { status, body } = await request(app)
.get(`/shared-links?albumId=${album.id}`)
.set('Authorization', `Bearer ${user1.accessToken}`);
expect(status).toBe(200);
expect(body).toHaveLength(2);
expect(body).toEqual(
expect.arrayContaining([
expect.objectContaining({ id: linkWithAlbum.id }),
expect.objectContaining({ id: linkWithPassword.id }),
]),
);
});
it('should find 0 albums', async () => {
const { status, body } = await request(app)
.get(`/shared-links?albumId=${uuidDto.notFound}`)
.set('Authorization', `Bearer ${user1.accessToken}`);
expect(status).toBe(200);
expect(body).toHaveLength(0);
});
it('should not get shared links created by other users', async () => {
const { status, body } = await request(app)
.get('/shared-links')
@@ -197,7 +170,7 @@ describe('/shared-links', () => {
expect(status).toBe(200);
expect(body).toEqual(
expect.objectContaining({
album: expect.objectContaining({ id: album.id }),
album,
userId: user1.userId,
type: SharedLinkType.Album,
}),
@@ -235,7 +208,7 @@ describe('/shared-links', () => {
expect(status).toBe(200);
expect(body).toEqual(
expect.objectContaining({
album: expect.objectContaining({ id: album.id }),
album,
userId: user1.userId,
type: SharedLinkType.Album,
}),
@@ -289,7 +262,7 @@ describe('/shared-links', () => {
expect(status).toBe(200);
expect(body).toEqual(
expect.objectContaining({
album: expect.objectContaining({ id: album.id }),
album,
userId: user1.userId,
type: SharedLinkType.Album,
}),

View File

@@ -119,84 +119,93 @@ describe('/stacks', () => {
const stacksAfter = await searchStacks({}, { headers: asBearerAuth(user1.accessToken) });
expect(stacksAfter.length).toBe(stacksBefore.length);
});
// it('should require a valid parent id', async () => {
// const { status, body } = await request(app)
// .put('/assets')
// .set('Authorization', `Bearer ${user1.accessToken}`)
// .send({ stackParentId: uuidDto.invalid, ids: [stackAssets[0].id] });
// expect(status).toBe(400);
// expect(body).toEqual(errorDto.badRequest(['stackParentId must be a UUID']));
// });
});
describe('GET /assets/:id', () => {
it('should include stack details for the primary asset', async () => {
const [asset1, asset2] = await Promise.all([
utils.createAsset(user1.accessToken),
utils.createAsset(user1.accessToken),
]);
// it('should require access to the parent', async () => {
// const { status, body } = await request(app)
// .put('/assets')
// .set('Authorization', `Bearer ${user1.accessToken}`)
// .send({ stackParentId: stackAssets[3].id, ids: [user1Assets[0].id] });
await utils.createStack(user1.accessToken, [asset1.id, asset2.id]);
// expect(status).toBe(400);
// expect(body).toEqual(errorDto.noPermission);
// });
const { status, body } = await request(app)
.get(`/assets/${asset1.id}`)
.set('Authorization', `Bearer ${user1.accessToken}`);
// it('should add stack children', async () => {
// const { status } = await request(app)
// .put('/assets')
// .set('Authorization', `Bearer ${stackUser.accessToken}`)
// .send({ stackParentId: stackAssets[0].id, ids: [stackAssets[3].id] });
expect(status).toBe(200);
expect(body).toEqual(
expect.objectContaining({
id: asset1.id,
stack: {
id: expect.any(String),
assetCount: 2,
primaryAssetId: asset1.id,
},
}),
);
});
// expect(status).toBe(204);
it('should include stack details for a non-primary asset', async () => {
const [asset1, asset2] = await Promise.all([
utils.createAsset(user1.accessToken),
utils.createAsset(user1.accessToken),
]);
// const asset = await getAssetInfo({ id: stackAssets[0].id }, { headers: asBearerAuth(stackUser.accessToken) });
// expect(asset.stack).not.toBeUndefined();
// expect(asset.stack).toEqual(expect.arrayContaining([expect.objectContaining({ id: stackAssets[3].id })]));
// });
await utils.createStack(user1.accessToken, [asset1.id, asset2.id]);
// it('should remove stack children', async () => {
// const { status } = await request(app)
// .put('/assets')
// .set('Authorization', `Bearer ${stackUser.accessToken}`)
// .send({ removeParent: true, ids: [stackAssets[1].id] });
const { status, body } = await request(app)
.get(`/assets/${asset2.id}`)
.set('Authorization', `Bearer ${user1.accessToken}`);
// expect(status).toBe(204);
expect(status).toBe(200);
expect(body).toEqual(
expect.objectContaining({
id: asset2.id,
stack: {
id: expect.any(String),
assetCount: 2,
primaryAssetId: asset1.id,
},
}),
);
});
});
// const asset = await getAssetInfo({ id: stackAssets[0].id }, { headers: asBearerAuth(stackUser.accessToken) });
// expect(asset.stack).not.toBeUndefined();
// expect(asset.stack).toEqual(
// expect.arrayContaining([
// expect.objectContaining({ id: stackAssets[2].id }),
// expect.objectContaining({ id: stackAssets[3].id }),
// ]),
// );
// });
describe('GET /stacks/:id', () => {
it('should include exifInfo in stack assets', async () => {
const [asset1, asset2] = await Promise.all([
utils.createAsset(user1.accessToken),
utils.createAsset(user1.accessToken),
]);
// it('should remove all stack children', async () => {
// const { status } = await request(app)
// .put('/assets')
// .set('Authorization', `Bearer ${stackUser.accessToken}`)
// .send({ removeParent: true, ids: [stackAssets[2].id, stackAssets[3].id] });
const stack = await utils.createStack(user1.accessToken, [asset1.id, asset2.id]);
// expect(status).toBe(204);
const { status, body } = await request(app)
.get(`/stacks/${stack.id}`)
.set('Authorization', `Bearer ${user1.accessToken}`);
// const asset = await getAssetInfo({ id: stackAssets[0].id }, { headers: asBearerAuth(stackUser.accessToken) });
// expect(asset.stack).toBeUndefined();
// });
expect(status).toBe(200);
expect(body).toEqual(
expect.objectContaining({
id: stack.id,
primaryAssetId: asset1.id,
assets: expect.arrayContaining([
expect.objectContaining({ id: asset1.id, exifInfo: expect.any(Object) }),
expect.objectContaining({ id: asset2.id, exifInfo: expect.any(Object) }),
]),
}),
);
});
});
// it('should merge stack children', async () => {
// // create stack after previous test removed stack children
// await updateAssets(
// { assetBulkUpdateDto: { stackParentId: stackAssets[0].id, ids: [stackAssets[1].id, stackAssets[2].id] } },
// { headers: asBearerAuth(stackUser.accessToken) },
// );
// const { status } = await request(app)
// .put('/assets')
// .set('Authorization', `Bearer ${stackUser.accessToken}`)
// .send({ stackParentId: stackAssets[3].id, ids: [stackAssets[0].id] });
// expect(status).toBe(204);
// const asset = await getAssetInfo({ id: stackAssets[3].id }, { headers: asBearerAuth(stackUser.accessToken) });
// expect(asset.stack).not.toBeUndefined();
// expect(asset.stack).toEqual(
// expect.arrayContaining([
// expect.objectContaining({ id: stackAssets[0].id }),
// expect.objectContaining({ id: stackAssets[1].id }),
// expect.objectContaining({ id: stackAssets[2].id }),
// ]),
// );
// });
});

Some files were not shown because too many files have changed in this diff Show More