feat(ml)!: cuda and openvino acceleration (#5619)
* cuda and openvino ep, refactor, update dockerfile * updated workflow * typing fixes * added tests * updated ml test gh action * updated README * updated docker-compose * added compute to hwaccel.yml * updated gh matrix updated gh matrix updated gh matrix updated gh matrix updated gh matrix give up * remove cuda/arm64 build * add hwaccel image tags to docker-compose * remove unnecessary quotes * add suffix to git tag * fixed kwargs in base model * armnn ld_library_path * update pyproject.toml * add armnn workflow * formatting * consolidate hwaccel files, update docker compose * update hw transcoding docs * add ml hwaccel docs * update dev and prod docker-compose * added armnn prerequisite docs * support 3.10 * updated docker-compose comments * formatting * test coverage * don't set arena extend strategy for openvino * working openvino * formatting * fix dockerfile * added type annotation * add wsl configuration for openvino * updated lock file * copy python3 * comment out extends section * fix platforms * simplify workflow suffix tagging * simplify aio transcoding doc * update docs and workflow for `hwaccel.yml` change * revert docs
This commit is contained in:
@@ -24,8 +24,8 @@ class Settings(BaseSettings):
|
||||
workers: int = 1
|
||||
test_full: bool = False
|
||||
request_threads: int = os.cpu_count() or 4
|
||||
model_inter_op_threads: int = 1
|
||||
model_intra_op_threads: int = 2
|
||||
model_inter_op_threads: int = 0
|
||||
model_intra_op_threads: int = 0
|
||||
ann: bool = True
|
||||
|
||||
class Config:
|
||||
|
||||
@@ -93,3 +93,15 @@ def clip_tokenizer_cfg() -> dict[str, Any]:
|
||||
"tokenizer_class": "CLIPTokenizer",
|
||||
"unk_token": "<|endoftext|>",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def providers(request: pytest.FixtureRequest) -> Iterator[dict[str, Any]]:
|
||||
marker = request.node.get_closest_marker("providers")
|
||||
if marker is None:
|
||||
raise ValueError("Missing marker 'providers'")
|
||||
|
||||
providers = marker.args[0]
|
||||
with mock.patch("app.models.base.ort.get_available_providers") as mocked:
|
||||
mocked.return_value = providers
|
||||
yield providers
|
||||
|
||||
@@ -131,7 +131,7 @@ async def load(model: InferenceModel) -> InferenceModel:
|
||||
await loop.run_in_executor(thread_pool, _load)
|
||||
return model
|
||||
except (OSError, InvalidProtobuf, BadZipFile, NoSuchFile):
|
||||
log.warn(
|
||||
log.warning(
|
||||
(
|
||||
f"Failed to load {model.model_type.replace('_', ' ')} model '{model.model_name}'."
|
||||
"Clearing cache and retrying."
|
||||
|
||||
@@ -11,6 +11,7 @@ from huggingface_hub import snapshot_download
|
||||
from typing_extensions import Buffer
|
||||
|
||||
import ann.ann
|
||||
from app.models.constants import SUPPORTED_PROVIDERS
|
||||
|
||||
from ..config import get_cache_dir, get_hf_model_name, log, settings
|
||||
from ..schemas import ModelType
|
||||
@@ -24,36 +25,17 @@ class InferenceModel(ABC):
|
||||
self,
|
||||
model_name: str,
|
||||
cache_dir: Path | str | None = None,
|
||||
inter_op_num_threads: int = settings.model_inter_op_threads,
|
||||
intra_op_num_threads: int = settings.model_intra_op_threads,
|
||||
providers: list[str] | None = None,
|
||||
provider_options: list[dict[str, Any]] | None = None,
|
||||
sess_options: ort.SessionOptions | None = None,
|
||||
**model_kwargs: Any,
|
||||
) -> None:
|
||||
self.model_name = model_name
|
||||
self.loaded = False
|
||||
self._cache_dir = Path(cache_dir) if cache_dir is not None else None
|
||||
self.providers = model_kwargs.pop("providers", ["CPUExecutionProvider"])
|
||||
# don't pre-allocate more memory than needed
|
||||
self.provider_options = model_kwargs.pop(
|
||||
"provider_options", [{"arena_extend_strategy": "kSameAsRequested"}] * len(self.providers)
|
||||
)
|
||||
log.debug(
|
||||
(
|
||||
f"Setting '{self.model_name}' execution providers to {self.providers} "
|
||||
"in descending order of preference"
|
||||
),
|
||||
)
|
||||
log.debug(f"Setting execution provider options to {self.provider_options}")
|
||||
self.sess_options = PicklableSessionOptions()
|
||||
# avoid thread contention between models
|
||||
if inter_op_num_threads > 1:
|
||||
self.sess_options.execution_mode = ort.ExecutionMode.ORT_PARALLEL
|
||||
|
||||
log.debug(f"Setting execution_mode to {self.sess_options.execution_mode.name}")
|
||||
log.debug(f"Setting inter_op_num_threads to {inter_op_num_threads}")
|
||||
log.debug(f"Setting intra_op_num_threads to {intra_op_num_threads}")
|
||||
self.sess_options.inter_op_num_threads = inter_op_num_threads
|
||||
self.sess_options.intra_op_num_threads = intra_op_num_threads
|
||||
self.sess_options.enable_cpu_mem_arena = False
|
||||
self.model_name = model_name
|
||||
self.cache_dir = Path(cache_dir) if cache_dir is not None else self.cache_dir_default
|
||||
self.providers = providers if providers is not None else self.providers_default
|
||||
self.provider_options = provider_options if provider_options is not None else self.provider_options_default
|
||||
self.sess_options = sess_options if sess_options is not None else self.sess_options_default
|
||||
|
||||
def download(self) -> None:
|
||||
if not self.cached:
|
||||
@@ -95,33 +77,9 @@ class InferenceModel(ABC):
|
||||
def _load(self) -> None:
|
||||
...
|
||||
|
||||
@property
|
||||
def model_type(self) -> ModelType:
|
||||
return self._model_type
|
||||
|
||||
@property
|
||||
def cache_dir(self) -> Path:
|
||||
return self._cache_dir if self._cache_dir is not None else get_cache_dir(self.model_name, self.model_type)
|
||||
|
||||
@cache_dir.setter
|
||||
def cache_dir(self, cache_dir: Path) -> None:
|
||||
self._cache_dir = cache_dir
|
||||
|
||||
@property
|
||||
def cached(self) -> bool:
|
||||
return self.cache_dir.exists() and any(self.cache_dir.iterdir())
|
||||
|
||||
@classmethod
|
||||
def from_model_type(cls, model_type: ModelType, model_name: str, **model_kwargs: Any) -> InferenceModel:
|
||||
subclasses = {subclass._model_type: subclass for subclass in cls.__subclasses__()}
|
||||
if model_type not in subclasses:
|
||||
raise ValueError(f"Unsupported model type: {model_type}")
|
||||
|
||||
return subclasses[model_type](model_name, **model_kwargs)
|
||||
|
||||
def clear_cache(self) -> None:
|
||||
if not self.cache_dir.exists():
|
||||
log.warn(
|
||||
log.warning(
|
||||
f"Attempted to clear cache for model '{self.model_name}', but cache directory does not exist",
|
||||
)
|
||||
return
|
||||
@@ -132,7 +90,7 @@ class InferenceModel(ABC):
|
||||
log.info(f"Cleared cache directory for model '{self.model_name}'.")
|
||||
rmtree(self.cache_dir)
|
||||
else:
|
||||
log.warn(
|
||||
log.warning(
|
||||
(
|
||||
f"Encountered file instead of directory at cache path "
|
||||
f"for '{self.model_name}'. Removing file and replacing with a directory."
|
||||
@@ -156,6 +114,107 @@ class InferenceModel(ABC):
|
||||
raise ValueError(f"the file model_path='{model_path}' does not exist")
|
||||
return session
|
||||
|
||||
@property
|
||||
def model_type(self) -> ModelType:
|
||||
return self._model_type
|
||||
|
||||
@property
|
||||
def cache_dir(self) -> Path:
|
||||
return self._cache_dir
|
||||
|
||||
@cache_dir.setter
|
||||
def cache_dir(self, cache_dir: Path) -> None:
|
||||
self._cache_dir = cache_dir
|
||||
|
||||
@property
|
||||
def cache_dir_default(self) -> Path:
|
||||
return get_cache_dir(self.model_name, self.model_type)
|
||||
|
||||
@property
|
||||
def cached(self) -> bool:
|
||||
return self.cache_dir.exists() and any(self.cache_dir.iterdir())
|
||||
|
||||
@property
|
||||
def providers(self) -> list[str]:
|
||||
return self._providers
|
||||
|
||||
@providers.setter
|
||||
def providers(self, providers: list[str]) -> None:
|
||||
log.debug(
|
||||
(f"Setting '{self.model_name}' execution providers to {providers}, " "in descending order of preference"),
|
||||
)
|
||||
self._providers = providers
|
||||
|
||||
@property
|
||||
def providers_default(self) -> list[str]:
|
||||
available_providers = set(ort.get_available_providers())
|
||||
log.debug(f"Available ORT providers: {available_providers}")
|
||||
return [provider for provider in SUPPORTED_PROVIDERS if provider in available_providers]
|
||||
|
||||
@property
|
||||
def provider_options(self) -> list[dict[str, Any]]:
|
||||
return self._provider_options
|
||||
|
||||
@provider_options.setter
|
||||
def provider_options(self, provider_options: list[dict[str, Any]]) -> None:
|
||||
log.debug(f"Setting execution provider options to {provider_options}")
|
||||
self._provider_options = provider_options
|
||||
|
||||
@property
|
||||
def provider_options_default(self) -> list[dict[str, Any]]:
|
||||
options = []
|
||||
for provider in self.providers:
|
||||
match provider:
|
||||
case "CPUExecutionProvider" | "CUDAExecutionProvider":
|
||||
option = {"arena_extend_strategy": "kSameAsRequested"}
|
||||
case "OpenVINOExecutionProvider":
|
||||
try:
|
||||
device_ids: list[str] = ort.capi._pybind_state.get_available_openvino_device_ids()
|
||||
log.debug(f"Available OpenVINO devices: {device_ids}")
|
||||
gpu_devices = [device_id for device_id in device_ids if device_id.startswith("GPU")]
|
||||
option = {"device_id": gpu_devices[0]} if gpu_devices else {}
|
||||
except AttributeError as e:
|
||||
log.warning("Failed to get OpenVINO device IDs. Using default options.")
|
||||
log.error(e)
|
||||
option = {}
|
||||
case _:
|
||||
option = {}
|
||||
options.append(option)
|
||||
return options
|
||||
|
||||
@property
|
||||
def sess_options(self) -> ort.SessionOptions:
|
||||
return self._sess_options
|
||||
|
||||
@sess_options.setter
|
||||
def sess_options(self, sess_options: ort.SessionOptions) -> None:
|
||||
log.debug(f"Setting execution_mode to {sess_options.execution_mode.name}")
|
||||
log.debug(f"Setting inter_op_num_threads to {sess_options.inter_op_num_threads}")
|
||||
log.debug(f"Setting intra_op_num_threads to {sess_options.intra_op_num_threads}")
|
||||
self._sess_options = sess_options
|
||||
|
||||
@property
|
||||
def sess_options_default(self) -> ort.SessionOptions:
|
||||
sess_options = PicklableSessionOptions()
|
||||
sess_options.enable_cpu_mem_arena = False
|
||||
|
||||
# avoid thread contention between models
|
||||
if settings.model_inter_op_threads > 0:
|
||||
sess_options.inter_op_num_threads = settings.model_inter_op_threads
|
||||
# these defaults work well for CPU, but bottleneck GPU
|
||||
elif settings.model_inter_op_threads == 0 and self.providers == ["CPUExecutionProvider"]:
|
||||
sess_options.inter_op_num_threads = 1
|
||||
|
||||
if settings.model_intra_op_threads > 0:
|
||||
sess_options.intra_op_num_threads = settings.model_intra_op_threads
|
||||
elif settings.model_intra_op_threads == 0 and self.providers == ["CPUExecutionProvider"]:
|
||||
sess_options.intra_op_num_threads = 2
|
||||
|
||||
if sess_options.inter_op_num_threads > 1:
|
||||
sess_options.execution_mode = ort.ExecutionMode.ORT_PARALLEL
|
||||
|
||||
return sess_options
|
||||
|
||||
|
||||
# HF deep copies configs, so we need to make session options picklable
|
||||
class PicklableSessionOptions(ort.SessionOptions): # type: ignore[misc]
|
||||
|
||||
@@ -23,7 +23,7 @@ class BaseCLIPEncoder(InferenceModel):
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str,
|
||||
cache_dir: str | None = None,
|
||||
cache_dir: Path | str | None = None,
|
||||
mode: Literal["text", "vision"] | None = None,
|
||||
**model_kwargs: Any,
|
||||
) -> None:
|
||||
@@ -136,7 +136,7 @@ class OpenCLIPEncoder(BaseCLIPEncoder):
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str,
|
||||
cache_dir: str | None = None,
|
||||
cache_dir: Path | str | None = None,
|
||||
mode: Literal["text", "vision"] | None = None,
|
||||
**model_kwargs: Any,
|
||||
) -> None:
|
||||
|
||||
@@ -51,6 +51,13 @@ _INSIGHTFACE_MODELS = {
|
||||
}
|
||||
|
||||
|
||||
SUPPORTED_PROVIDERS = [
|
||||
"CUDAExecutionProvider",
|
||||
"OpenVINOExecutionProvider",
|
||||
"CPUExecutionProvider",
|
||||
]
|
||||
|
||||
|
||||
def is_openclip(model_name: str) -> bool:
|
||||
return clean_name(model_name) in _OPENCLIP_MODELS
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from enum import StrEnum
|
||||
from enum import Enum
|
||||
from typing import Any, Protocol, TypedDict, TypeGuard
|
||||
|
||||
import numpy as np
|
||||
@@ -21,7 +21,7 @@ class BoundingBox(TypedDict):
|
||||
y2: int
|
||||
|
||||
|
||||
class ModelType(StrEnum):
|
||||
class ModelType(str, Enum):
|
||||
CLIP = "clip"
|
||||
FACIAL_RECOGNITION = "facial-recognition"
|
||||
|
||||
|
||||
@@ -7,12 +7,13 @@ from unittest import mock
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from PIL import Image
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from .config import settings
|
||||
from .config import log, settings
|
||||
from .models.base import InferenceModel, PicklableSessionOptions
|
||||
from .models.cache import ModelCache
|
||||
from .models.clip import OpenCLIPEncoder
|
||||
@@ -20,6 +21,221 @@ from .models.facial_recognition import FaceRecognizer
|
||||
from .schemas import ModelType
|
||||
|
||||
|
||||
class TestBase:
|
||||
CPU_EP = ["CPUExecutionProvider"]
|
||||
CUDA_EP = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
||||
OV_EP = ["OpenVINOExecutionProvider", "CPUExecutionProvider"]
|
||||
CUDA_EP_OUT_OF_ORDER = ["CPUExecutionProvider", "CUDAExecutionProvider"]
|
||||
TRT_EP = ["TensorrtExecutionProvider", "CUDAExecutionProvider", "CPUExecutionProvider"]
|
||||
|
||||
@pytest.mark.providers(CPU_EP)
|
||||
def test_sets_cpu_provider(self, providers: list[str]) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
|
||||
assert encoder.providers == self.CPU_EP
|
||||
|
||||
@pytest.mark.providers(CUDA_EP)
|
||||
def test_sets_cuda_provider_if_available(self, providers: list[str]) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
|
||||
assert encoder.providers == self.CUDA_EP
|
||||
|
||||
@pytest.mark.providers(OV_EP)
|
||||
def test_sets_openvino_provider_if_available(self, providers: list[str]) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
|
||||
assert encoder.providers == self.OV_EP
|
||||
|
||||
@pytest.mark.providers(CUDA_EP_OUT_OF_ORDER)
|
||||
def test_sets_providers_in_correct_order(self, providers: list[str]) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
|
||||
assert encoder.providers == self.CUDA_EP
|
||||
|
||||
@pytest.mark.providers(TRT_EP)
|
||||
def test_ignores_unsupported_providers(self, providers: list[str]) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
|
||||
assert encoder.providers == self.CUDA_EP
|
||||
|
||||
def test_sets_provider_kwarg(self) -> None:
|
||||
providers = ["CUDAExecutionProvider"]
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", providers=providers)
|
||||
|
||||
assert encoder.providers == providers
|
||||
|
||||
def test_sets_default_provider_options(self) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"])
|
||||
|
||||
assert encoder.provider_options == [
|
||||
{},
|
||||
{"arena_extend_strategy": "kSameAsRequested"},
|
||||
]
|
||||
|
||||
def test_sets_provider_options_kwarg(self) -> None:
|
||||
encoder = OpenCLIPEncoder(
|
||||
"ViT-B-32__openai",
|
||||
providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"],
|
||||
provider_options=[],
|
||||
)
|
||||
|
||||
assert encoder.provider_options == []
|
||||
|
||||
def test_sets_default_sess_options(self) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
|
||||
assert encoder.sess_options.execution_mode == ort.ExecutionMode.ORT_SEQUENTIAL
|
||||
assert encoder.sess_options.inter_op_num_threads == 1
|
||||
assert encoder.sess_options.intra_op_num_threads == 2
|
||||
assert encoder.sess_options.enable_cpu_mem_arena is False
|
||||
|
||||
def test_sets_default_sess_options_does_not_set_threads_if_non_cpu_and_default_threads(self) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
|
||||
|
||||
assert encoder.sess_options.inter_op_num_threads == 0
|
||||
assert encoder.sess_options.intra_op_num_threads == 0
|
||||
|
||||
def test_sets_default_sess_options_sets_threads_if_non_cpu_and_set_threads(self, mocker: MockerFixture) -> None:
|
||||
mock_settings = mocker.patch("app.models.base.settings", autospec=True)
|
||||
mock_settings.model_inter_op_threads = 2
|
||||
mock_settings.model_intra_op_threads = 4
|
||||
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
|
||||
|
||||
assert encoder.sess_options.inter_op_num_threads == 2
|
||||
assert encoder.sess_options.intra_op_num_threads == 4
|
||||
|
||||
def test_sets_sess_options_kwarg(self) -> None:
|
||||
sess_options = ort.SessionOptions()
|
||||
encoder = OpenCLIPEncoder(
|
||||
"ViT-B-32__openai",
|
||||
providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"],
|
||||
provider_options=[],
|
||||
sess_options=sess_options,
|
||||
)
|
||||
|
||||
assert sess_options is encoder.sess_options
|
||||
|
||||
def test_sets_default_cache_dir(self) -> None:
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
|
||||
assert encoder.cache_dir == Path("/cache/clip/ViT-B-32__openai")
|
||||
|
||||
def test_sets_cache_dir_kwarg(self) -> None:
|
||||
cache_dir = Path("/test_cache")
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=cache_dir)
|
||||
|
||||
assert encoder.cache_dir == cache_dir
|
||||
|
||||
def test_casts_cache_dir_string_to_path(self) -> None:
|
||||
cache_dir = "/test_cache"
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=cache_dir)
|
||||
|
||||
assert encoder.cache_dir == Path(cache_dir)
|
||||
|
||||
def test_clear_cache(self, mocker: MockerFixture) -> None:
|
||||
mock_rmtree = mocker.patch("app.models.base.rmtree", autospec=True)
|
||||
mock_rmtree.avoids_symlink_attacks = True
|
||||
mock_cache_dir = mocker.Mock()
|
||||
mock_cache_dir.exists.return_value = True
|
||||
mock_cache_dir.is_dir.return_value = True
|
||||
mocker.patch("app.models.base.Path", return_value=mock_cache_dir)
|
||||
info = mocker.spy(log, "info")
|
||||
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=mock_cache_dir)
|
||||
encoder.clear_cache()
|
||||
|
||||
mock_rmtree.assert_called_once_with(encoder.cache_dir)
|
||||
info.assert_called_once()
|
||||
|
||||
def test_clear_cache_warns_if_path_does_not_exist(self, mocker: MockerFixture) -> None:
|
||||
mock_rmtree = mocker.patch("app.models.base.rmtree", autospec=True)
|
||||
mock_rmtree.avoids_symlink_attacks = True
|
||||
mock_cache_dir = mocker.Mock()
|
||||
mock_cache_dir.exists.return_value = False
|
||||
mock_cache_dir.is_dir.return_value = True
|
||||
mocker.patch("app.models.base.Path", return_value=mock_cache_dir)
|
||||
warning = mocker.spy(log, "warning")
|
||||
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=mock_cache_dir)
|
||||
encoder.clear_cache()
|
||||
|
||||
mock_rmtree.assert_not_called()
|
||||
warning.assert_called_once()
|
||||
|
||||
def test_clear_cache_raises_exception_if_vulnerable_to_symlink_attack(self, mocker: MockerFixture) -> None:
|
||||
mock_rmtree = mocker.patch("app.models.base.rmtree", autospec=True)
|
||||
mock_rmtree.avoids_symlink_attacks = False
|
||||
mock_cache_dir = mocker.Mock()
|
||||
mock_cache_dir.exists.return_value = True
|
||||
mock_cache_dir.is_dir.return_value = True
|
||||
mocker.patch("app.models.base.Path", return_value=mock_cache_dir)
|
||||
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=mock_cache_dir)
|
||||
with pytest.raises(RuntimeError):
|
||||
encoder.clear_cache()
|
||||
|
||||
mock_rmtree.assert_not_called()
|
||||
|
||||
def test_clear_cache_replaces_file_with_dir_if_path_is_file(self, mocker: MockerFixture) -> None:
|
||||
mock_rmtree = mocker.patch("app.models.base.rmtree", autospec=True)
|
||||
mock_rmtree.avoids_symlink_attacks = True
|
||||
mock_cache_dir = mocker.Mock()
|
||||
mock_cache_dir.exists.return_value = True
|
||||
mock_cache_dir.is_dir.return_value = False
|
||||
mocker.patch("app.models.base.Path", return_value=mock_cache_dir)
|
||||
warning = mocker.spy(log, "warning")
|
||||
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=mock_cache_dir)
|
||||
encoder.clear_cache()
|
||||
|
||||
mock_rmtree.assert_not_called()
|
||||
mock_cache_dir.unlink.assert_called_once()
|
||||
mock_cache_dir.mkdir.assert_called_once()
|
||||
warning.assert_called_once()
|
||||
|
||||
def test_make_session_return_ann_if_available(self, mocker: MockerFixture) -> None:
|
||||
mock_cache_dir = mocker.Mock()
|
||||
mock_cache_dir.is_file.return_value = True
|
||||
mock_cache_dir.with_suffix.return_value = mock_cache_dir
|
||||
mocker.patch.object(settings, "ann", True)
|
||||
mocker.patch("ann.ann.is_available", True)
|
||||
mock_session = mocker.patch("app.models.base.AnnSession")
|
||||
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
encoder._make_session(mock_cache_dir)
|
||||
|
||||
mock_session.assert_called_once()
|
||||
|
||||
def test_make_session_return_ort_if_available_and_ann_is_not(self, mocker: MockerFixture) -> None:
|
||||
mock_cache_dir = mocker.Mock()
|
||||
mock_cache_dir.is_file.return_value = True
|
||||
mock_cache_dir.with_suffix.return_value = mock_cache_dir
|
||||
mocker.patch.object(settings, "ann", False)
|
||||
mocker.patch("ann.ann.is_available", False)
|
||||
mock_session = mocker.patch("app.models.base.ort.InferenceSession")
|
||||
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
encoder._make_session(mock_cache_dir)
|
||||
|
||||
mock_session.assert_called_once()
|
||||
|
||||
def test_make_session_raises_exception_if_path_does_not_exist(self, mocker: MockerFixture) -> None:
|
||||
mock_cache_dir = mocker.Mock()
|
||||
mock_cache_dir.is_file.return_value = False
|
||||
mock_cache_dir.with_suffix.return_value = mock_cache_dir
|
||||
mocker.patch("ann.ann.is_available", False)
|
||||
mock_ann = mocker.patch("app.models.base.ort.InferenceSession")
|
||||
mock_ort = mocker.patch("app.models.base.ort.InferenceSession")
|
||||
|
||||
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
||||
with pytest.raises(ValueError):
|
||||
encoder._make_session(mock_cache_dir)
|
||||
|
||||
mock_ann.assert_not_called()
|
||||
mock_ort.assert_not_called()
|
||||
|
||||
|
||||
class TestCLIP:
|
||||
embedding = np.random.rand(512).astype(np.float32)
|
||||
cache_dir = Path("test_cache")
|
||||
@@ -41,7 +257,7 @@ class TestCLIP:
|
||||
mocked.run.return_value = [[self.embedding]]
|
||||
mocker.patch("app.models.clip.Tokenizer.from_file", autospec=True)
|
||||
|
||||
clip_encoder = OpenCLIPEncoder("ViT-B-32::openai", cache_dir="test_cache", mode="vision")
|
||||
clip_encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir="test_cache", mode="vision")
|
||||
embedding = clip_encoder.predict(pil_image)
|
||||
|
||||
assert clip_encoder.mode == "vision"
|
||||
@@ -66,7 +282,7 @@ class TestCLIP:
|
||||
mocked.run.return_value = [[self.embedding]]
|
||||
mocker.patch("app.models.clip.Tokenizer.from_file", autospec=True)
|
||||
|
||||
clip_encoder = OpenCLIPEncoder("ViT-B-32::openai", cache_dir="test_cache", mode="text")
|
||||
clip_encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir="test_cache", mode="text")
|
||||
embedding = clip_encoder.predict("test search query")
|
||||
|
||||
assert clip_encoder.mode == "text"
|
||||
@@ -166,7 +382,7 @@ class TestEndpoints:
|
||||
pil_image.save(byte_image, format="jpeg")
|
||||
response = deployed_app.post(
|
||||
"http://localhost:3003/predict",
|
||||
data={"modelName": "ViT-B-32::openai", "modelType": "clip", "options": json.dumps({"mode": "vision"})},
|
||||
data={"modelName": "ViT-B-32__openai", "modelType": "clip", "options": json.dumps({"mode": "vision"})},
|
||||
files={"image": byte_image.getvalue()},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
@@ -176,7 +392,7 @@ class TestEndpoints:
|
||||
response = deployed_app.post(
|
||||
"http://localhost:3003/predict",
|
||||
data={
|
||||
"modelName": "ViT-B-32::openai",
|
||||
"modelName": "ViT-B-32__openai",
|
||||
"modelType": "clip",
|
||||
"text": "test search query",
|
||||
"options": json.dumps({"mode": "text"}),
|
||||
|
||||
Reference in New Issue
Block a user