feat(ml)!: customizable ML settings (#3891)

* consolidated endpoints, added live configuration

* added ml settings to server

* added settings dashboard

* updated deps, fixed typos

* simplified modelconfig

updated tests

* Added ml setting accordion for admin page

updated tests

* merge `clipText` and `clipVision`

* added face distance setting

clarified setting

* add clip mode in request, dropdown for face models

* polished ml settings

updated descriptions

* update clip field on error

* removed unused import

* add description for image classification threshold

* pin safetensors for arm wheel

updated poetry lock

* moved dto

* set model type only in ml repository

* revert form-data package install

use fetch instead of axios

* added slotted description with link

updated facial recognition description

clarified effect of disabling tasks

* validation before model load

* removed unnecessary getconfig call

* added migration

* updated api

updated api

updated api

---------

Co-authored-by: Alex Tran <alex.tran1502@gmail.com>
This commit is contained in:
Mert
2023-08-29 09:58:00 -04:00
committed by GitHub
parent 22f5e05060
commit bcc36d14a1
56 changed files with 2324 additions and 655 deletions

View File

@@ -60,16 +60,21 @@ class InferenceModel(ABC):
self._load(**model_kwargs)
self._loaded = True
def predict(self, inputs: Any) -> Any:
def predict(self, inputs: Any, **model_kwargs: Any) -> Any:
if not self._loaded:
print(f"Loading {self.model_type.value.replace('_', ' ')} model...")
self.load()
if model_kwargs:
self.configure(**model_kwargs)
return self._predict(inputs)
@abstractmethod
def _predict(self, inputs: Any) -> Any:
...
def configure(self, **model_kwargs: Any) -> None:
pass
@abstractmethod
def _download(self, **model_kwargs: Any) -> None:
...

View File

@@ -1,5 +1,6 @@
import os
import zipfile
from io import BytesIO
from typing import Any, Literal
import onnxruntime as ort
@@ -8,7 +9,7 @@ from clip_server.model.clip import BICUBIC, _convert_image_to_rgb
from clip_server.model.clip_onnx import _MODELS, _S3_BUCKET_V2, CLIPOnnxModel, download_model
from clip_server.model.pretrained_models import _VISUAL_MODEL_IMAGE_SIZE
from clip_server.model.tokenization import Tokenizer
from PIL.Image import Image
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from ..schemas import ModelType
@@ -74,9 +75,12 @@ class CLIPEncoder(InferenceModel):
image_size = _VISUAL_MODEL_IMAGE_SIZE[CLIPOnnxModel.get_model_name(self.model_name)]
self.transform = _transform_pil_image(image_size)
def _predict(self, image_or_text: Image | str) -> list[float]:
def _predict(self, image_or_text: Image.Image | str) -> list[float]:
if isinstance(image_or_text, bytes):
image_or_text = Image.open(BytesIO(image_or_text))
match image_or_text:
case Image():
case Image.Image():
if self.mode == "text":
raise TypeError("Cannot encode image as text-only model")
pixel_values = self.transform(image_or_text)

View File

@@ -9,7 +9,6 @@ from insightface.model_zoo import ArcFaceONNX, RetinaFace
from insightface.utils.face_align import norm_crop
from insightface.utils.storage import BASE_REPO_URL, download_file
from ..config import settings
from ..schemas import ModelType
from .base import InferenceModel
@@ -20,7 +19,7 @@ class FaceRecognizer(InferenceModel):
def __init__(
self,
model_name: str,
min_score: float = settings.min_face_score,
min_score: float = 0.7,
cache_dir: Path | str | None = None,
**model_kwargs: Any,
) -> None:
@@ -69,11 +68,13 @@ class FaceRecognizer(InferenceModel):
)
self.rec_model.prepare(ctx_id=0)
def _predict(self, image: cv2.Mat) -> list[dict[str, Any]]:
def _predict(self, image: np.ndarray[int, np.dtype[Any]] | bytes) -> list[dict[str, Any]]:
if isinstance(image, bytes):
image = cv2.imdecode(np.frombuffer(image, np.uint8), cv2.IMREAD_COLOR)
bboxes, kpss = self.det_model.detect(image)
if bboxes.size == 0:
return []
assert isinstance(kpss, np.ndarray)
assert isinstance(image, np.ndarray) and isinstance(kpss, np.ndarray)
scores = bboxes[:, 4].tolist()
bboxes = bboxes[:, :4].round().tolist()
@@ -102,3 +103,6 @@ class FaceRecognizer(InferenceModel):
@property
def cached(self) -> bool:
return self.cache_dir.is_dir() and any(self.cache_dir.glob("*.onnx"))
def configure(self, **model_kwargs: Any) -> None:
self.det_model.det_thresh = model_kwargs.get("min_score", self.det_model.det_thresh)

View File

@@ -1,13 +1,13 @@
from io import BytesIO
from pathlib import Path
from typing import Any
from huggingface_hub import snapshot_download
from optimum.onnxruntime import ORTModelForImageClassification
from optimum.pipelines import pipeline
from PIL.Image import Image
from PIL import Image
from transformers import AutoImageProcessor
from ..config import settings
from ..schemas import ModelType
from .base import InferenceModel
@@ -18,7 +18,7 @@ class ImageClassifier(InferenceModel):
def __init__(
self,
model_name: str,
min_score: float = settings.min_tag_score,
min_score: float = 0.9,
cache_dir: Path | str | None = None,
**model_kwargs: Any,
) -> None:
@@ -56,8 +56,13 @@ class ImageClassifier(InferenceModel):
feature_extractor=processor,
)
def _predict(self, image: Image) -> list[str]:
def _predict(self, image: Image.Image | bytes) -> list[str]:
if isinstance(image, bytes):
image = Image.open(BytesIO(image))
predictions: list[dict[str, Any]] = self.model(image) # type: ignore
tags = [tag for pred in predictions for tag in pred["label"].split(", ") if pred["score"] >= self.min_score]
return tags
def configure(self, **model_kwargs: Any) -> None:
self.min_score = model_kwargs.get("min_score", self.min_score)