mirror of
https://github.com/KevinMidboe/immich.git
synced 2025-10-29 17:40:28 +00:00
chore(ml): added testing and github workflow (#2969)
* added testing * github action for python, made mypy happy * formatted with black * minor fixes and styling * test model cache * cache test dependencies * narrowed model cache tests * moved endpoint tests to their own class * cleaned up fixtures * formatting * removed unused dep
This commit is contained in:
@@ -18,6 +18,7 @@ class Settings(BaseSettings):
|
||||
port: int = 3003
|
||||
workers: int = 1
|
||||
min_face_score: float = 0.7
|
||||
test_full: bool = False
|
||||
|
||||
class Config(BaseSettings.Config):
|
||||
env_prefix = "MACHINE_LEARNING_"
|
||||
|
||||
119
machine-learning/app/conftest.py
Normal file
119
machine-learning/app/conftest.py
Normal file
@@ -0,0 +1,119 @@
|
||||
from types import SimpleNamespace
|
||||
from typing import Any, Iterator, TypeAlias
|
||||
from unittest import mock
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from PIL import Image
|
||||
|
||||
from .main import app, init_state
|
||||
|
||||
ndarray: TypeAlias = np.ndarray[int, np.dtype[np.float32]]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pil_image() -> Image.Image:
|
||||
return Image.new("RGB", (600, 800))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cv_image(pil_image: Image.Image) -> ndarray:
|
||||
return np.asarray(pil_image)[:, :, ::-1] # PIL uses RGB while cv2 uses BGR
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_classifier_pipeline() -> Iterator[mock.Mock]:
|
||||
with mock.patch("app.models.image_classification.pipeline") as model:
|
||||
classifier_preds = [
|
||||
{"label": "that's an image alright", "score": 0.8},
|
||||
{"label": "well it ends with .jpg", "score": 0.1},
|
||||
{"label": "idk, im just seeing bytes", "score": 0.05},
|
||||
{"label": "not sure", "score": 0.04},
|
||||
{"label": "probably a virus", "score": 0.01},
|
||||
]
|
||||
|
||||
def forward(
|
||||
inputs: Image.Image | list[Image.Image], **kwargs: Any
|
||||
) -> list[dict[str, Any]] | list[list[dict[str, Any]]]:
|
||||
if isinstance(inputs, list) and not all([isinstance(img, Image.Image) for img in inputs]):
|
||||
raise TypeError
|
||||
elif not isinstance(inputs, Image.Image):
|
||||
raise TypeError
|
||||
|
||||
if isinstance(inputs, list):
|
||||
return [classifier_preds] * len(inputs)
|
||||
|
||||
return classifier_preds
|
||||
|
||||
model.return_value = forward
|
||||
yield model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_st() -> Iterator[mock.Mock]:
|
||||
with mock.patch("app.models.clip.SentenceTransformer") as model:
|
||||
embedding = np.random.rand(512).astype(np.float32)
|
||||
|
||||
def encode(inputs: Image.Image | list[Image.Image], **kwargs: Any) -> ndarray | list[ndarray]:
|
||||
# mypy complains unless isinstance(inputs, list) is used explicitly
|
||||
img_batch = isinstance(inputs, list) and all([isinstance(inst, Image.Image) for inst in inputs])
|
||||
text_batch = isinstance(inputs, list) and all([isinstance(inst, str) for inst in inputs])
|
||||
if isinstance(inputs, list) and not any([img_batch, text_batch]):
|
||||
raise TypeError
|
||||
|
||||
if isinstance(inputs, list):
|
||||
return np.stack([embedding] * len(inputs))
|
||||
|
||||
return embedding
|
||||
|
||||
mocked = mock.Mock()
|
||||
mocked.encode = encode
|
||||
model.return_value = mocked
|
||||
yield model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_faceanalysis() -> Iterator[mock.Mock]:
|
||||
with mock.patch("app.models.facial_recognition.FaceAnalysis") as model:
|
||||
face_preds = [
|
||||
SimpleNamespace( # this is so these fields can be accessed through dot notation
|
||||
**{
|
||||
"bbox": np.random.rand(4).astype(np.float32),
|
||||
"kps": np.random.rand(5, 2).astype(np.float32),
|
||||
"det_score": np.array([0.67]).astype(np.float32),
|
||||
"normed_embedding": np.random.rand(512).astype(np.float32),
|
||||
}
|
||||
),
|
||||
SimpleNamespace(
|
||||
**{
|
||||
"bbox": np.random.rand(4).astype(np.float32),
|
||||
"kps": np.random.rand(5, 2).astype(np.float32),
|
||||
"det_score": np.array([0.4]).astype(np.float32),
|
||||
"normed_embedding": np.random.rand(512).astype(np.float32),
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
def get(image: np.ndarray[int, np.dtype[np.float32]], **kwargs: Any) -> list[SimpleNamespace]:
|
||||
if not isinstance(image, np.ndarray):
|
||||
raise TypeError
|
||||
|
||||
return face_preds
|
||||
|
||||
mocked = mock.Mock()
|
||||
mocked.get = get
|
||||
model.return_value = mocked
|
||||
yield model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_get_model() -> Iterator[mock.Mock]:
|
||||
with mock.patch("app.models.cache.InferenceModel.from_model_type", autospec=True) as mocked:
|
||||
yield mocked
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def deployed_app() -> TestClient:
|
||||
init_state()
|
||||
return TestClient(app)
|
||||
@@ -24,9 +24,11 @@ from .schemas import (
|
||||
app = FastAPI()
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup_event() -> None:
|
||||
def init_state() -> None:
|
||||
app.state.model_cache = ModelCache(ttl=settings.model_ttl, revalidate=True)
|
||||
|
||||
|
||||
async def load_models() -> None:
|
||||
models = [
|
||||
(settings.classification_model, ModelType.IMAGE_CLASSIFICATION),
|
||||
(settings.clip_image_model, ModelType.CLIP),
|
||||
@@ -42,6 +44,12 @@ async def startup_event() -> None:
|
||||
InferenceModel.from_model_type(model_type, model_name)
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup_event() -> None:
|
||||
init_state()
|
||||
await load_models()
|
||||
|
||||
|
||||
def dep_pil_image(byte_image: bytes = Body(...)) -> Image.Image:
|
||||
return Image.open(BytesIO(byte_image))
|
||||
|
||||
@@ -69,9 +77,7 @@ def ping() -> str:
|
||||
async def image_classification(
|
||||
image: Image.Image = Depends(dep_pil_image),
|
||||
) -> list[str]:
|
||||
model = await app.state.model_cache.get(
|
||||
settings.classification_model, ModelType.IMAGE_CLASSIFICATION
|
||||
)
|
||||
model = await app.state.model_cache.get(settings.classification_model, ModelType.IMAGE_CLASSIFICATION)
|
||||
labels = model.predict(image)
|
||||
return labels
|
||||
|
||||
@@ -108,9 +114,7 @@ async def clip_encode_text(payload: TextModelRequest) -> list[float]:
|
||||
async def facial_recognition(
|
||||
image: cv2.Mat = Depends(dep_cv_image),
|
||||
) -> list[dict[str, Any]]:
|
||||
model = await app.state.model_cache.get(
|
||||
settings.facial_recognition_model, ModelType.FACIAL_RECOGNITION
|
||||
)
|
||||
model = await app.state.model_cache.get(settings.facial_recognition_model, ModelType.FACIAL_RECOGNITION)
|
||||
faces = model.predict(image)
|
||||
return faces
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ from pathlib import Path
|
||||
from shutil import rmtree
|
||||
from typing import Any
|
||||
|
||||
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidProtobuf
|
||||
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidProtobuf # type: ignore
|
||||
|
||||
from ..config import get_cache_dir
|
||||
from ..schemas import ModelType
|
||||
@@ -14,15 +14,9 @@ from ..schemas import ModelType
|
||||
class InferenceModel(ABC):
|
||||
_model_type: ModelType
|
||||
|
||||
def __init__(
|
||||
self, model_name: str, cache_dir: Path | None = None, **model_kwargs
|
||||
) -> None:
|
||||
def __init__(self, model_name: str, cache_dir: Path | str | None = None, **model_kwargs: Any) -> None:
|
||||
self.model_name = model_name
|
||||
self._cache_dir = (
|
||||
cache_dir
|
||||
if cache_dir is not None
|
||||
else get_cache_dir(model_name, self.model_type)
|
||||
)
|
||||
self._cache_dir = Path(cache_dir) if cache_dir is not None else get_cache_dir(model_name, self.model_type)
|
||||
|
||||
try:
|
||||
self.load(**model_kwargs)
|
||||
@@ -51,12 +45,8 @@ class InferenceModel(ABC):
|
||||
self._cache_dir = cache_dir
|
||||
|
||||
@classmethod
|
||||
def from_model_type(
|
||||
cls, model_type: ModelType, model_name, **model_kwargs
|
||||
) -> InferenceModel:
|
||||
subclasses = {
|
||||
subclass._model_type: subclass for subclass in cls.__subclasses__()
|
||||
}
|
||||
def from_model_type(cls, model_type: ModelType, model_name: str, **model_kwargs: Any) -> InferenceModel:
|
||||
subclasses = {subclass._model_type: subclass for subclass in cls.__subclasses__()}
|
||||
if model_type not in subclasses:
|
||||
raise ValueError(f"Unsupported model type: {model_type}")
|
||||
|
||||
@@ -66,8 +56,6 @@ class InferenceModel(ABC):
|
||||
if not self.cache_dir.exists():
|
||||
return
|
||||
elif not rmtree.avoids_symlink_attacks:
|
||||
raise RuntimeError(
|
||||
"Attempted to clear cache, but rmtree is not safe on this platform."
|
||||
)
|
||||
raise RuntimeError("Attempted to clear cache, but rmtree is not safe on this platform.")
|
||||
|
||||
rmtree(self.cache_dir)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import asyncio
|
||||
from typing import Any
|
||||
|
||||
from aiocache.backends.memory import SimpleMemoryCache
|
||||
from aiocache.lock import OptimisticLock
|
||||
@@ -34,13 +35,9 @@ class ModelCache:
|
||||
if profiling:
|
||||
plugins.append(TimingPlugin())
|
||||
|
||||
self.cache = SimpleMemoryCache(
|
||||
ttl=ttl, timeout=timeout, plugins=plugins, namespace=None
|
||||
)
|
||||
self.cache = SimpleMemoryCache(ttl=ttl, timeout=timeout, plugins=plugins, namespace=None)
|
||||
|
||||
async def get(
|
||||
self, model_name: str, model_type: ModelType, **model_kwargs
|
||||
) -> InferenceModel:
|
||||
async def get(self, model_name: str, model_type: ModelType, **model_kwargs: Any) -> InferenceModel:
|
||||
"""
|
||||
Args:
|
||||
model_name: Name of model in the model hub used for the task.
|
||||
@@ -56,9 +53,7 @@ class ModelCache:
|
||||
async with OptimisticLock(self.cache, key) as lock:
|
||||
model = await asyncio.get_running_loop().run_in_executor(
|
||||
None,
|
||||
lambda: InferenceModel.from_model_type(
|
||||
model_type, model_name, **model_kwargs
|
||||
),
|
||||
lambda: InferenceModel.from_model_type(model_type, model_name, **model_kwargs),
|
||||
)
|
||||
await lock.cas(model, ttl=self.ttl)
|
||||
return model
|
||||
@@ -73,7 +68,14 @@ class ModelCache:
|
||||
class RevalidationPlugin(BasePlugin):
|
||||
"""Revalidates cache item's TTL after cache hit."""
|
||||
|
||||
async def post_get(self, client, key, ret=None, namespace=None, **kwargs):
|
||||
async def post_get(
|
||||
self,
|
||||
client: SimpleMemoryCache,
|
||||
key: str,
|
||||
ret: Any | None = None,
|
||||
namespace: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
if ret is None:
|
||||
return
|
||||
if namespace is not None:
|
||||
@@ -81,7 +83,14 @@ class RevalidationPlugin(BasePlugin):
|
||||
if key in client._handlers:
|
||||
await client.expire(key, client.ttl)
|
||||
|
||||
async def post_multi_get(self, client, keys, ret=None, namespace=None, **kwargs):
|
||||
async def post_multi_get(
|
||||
self,
|
||||
client: SimpleMemoryCache,
|
||||
keys: list[str],
|
||||
ret: list[Any] | None = None,
|
||||
namespace: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
if ret is None:
|
||||
return
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ class FaceRecognizer(InferenceModel):
|
||||
self,
|
||||
model_name: str,
|
||||
min_score: float = settings.min_face_score,
|
||||
cache_dir: Path | None = None,
|
||||
**model_kwargs,
|
||||
cache_dir: Path | str | None = None,
|
||||
**model_kwargs: Any,
|
||||
) -> None:
|
||||
self.min_score = min_score
|
||||
super().__init__(model_name, cache_dir, **model_kwargs)
|
||||
|
||||
@@ -16,8 +16,8 @@ class ImageClassifier(InferenceModel):
|
||||
self,
|
||||
model_name: str,
|
||||
min_score: float = settings.min_tag_score,
|
||||
cache_dir: Path | None = None,
|
||||
**model_kwargs,
|
||||
cache_dir: Path | str | None = None,
|
||||
**model_kwargs: Any,
|
||||
) -> None:
|
||||
self.min_score = min_score
|
||||
super().__init__(model_name, cache_dir, **model_kwargs)
|
||||
@@ -30,13 +30,7 @@ class ImageClassifier(InferenceModel):
|
||||
)
|
||||
|
||||
def predict(self, image: Image) -> list[str]:
|
||||
predictions = self.model(image)
|
||||
tags = list(
|
||||
{
|
||||
tag
|
||||
for pred in predictions
|
||||
for tag in pred["label"].split(", ")
|
||||
if pred["score"] >= self.min_score
|
||||
}
|
||||
)
|
||||
predictions: list[dict[str, Any]] = self.model(image) # type: ignore
|
||||
tags = [tag for pred in predictions for tag in pred["label"].split(", ") if pred["score"] >= self.min_score]
|
||||
|
||||
return tags
|
||||
|
||||
@@ -4,10 +4,7 @@ from pydantic import BaseModel
|
||||
|
||||
|
||||
def to_lower_camel(string: str) -> str:
|
||||
tokens = [
|
||||
token.capitalize() if i > 0 else token
|
||||
for i, token in enumerate(string.split("_"))
|
||||
]
|
||||
tokens = [token.capitalize() if i > 0 else token for i, token in enumerate(string.split("_"))]
|
||||
return "".join(tokens)
|
||||
|
||||
|
||||
|
||||
183
machine-learning/app/test_main.py
Normal file
183
machine-learning/app/test_main.py
Normal file
@@ -0,0 +1,183 @@
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from unittest import mock
|
||||
|
||||
import cv2
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from PIL import Image
|
||||
|
||||
from .config import settings
|
||||
from .models.cache import ModelCache
|
||||
from .models.clip import CLIPSTEncoder
|
||||
from .models.facial_recognition import FaceRecognizer
|
||||
from .models.image_classification import ImageClassifier
|
||||
from .schemas import ModelType
|
||||
|
||||
|
||||
class TestImageClassifier:
|
||||
def test_init(self, mock_classifier_pipeline: mock.Mock) -> None:
|
||||
cache_dir = Path("test_cache")
|
||||
classifier = ImageClassifier("test_model_name", 0.5, cache_dir=cache_dir)
|
||||
|
||||
assert classifier.min_score == 0.5
|
||||
mock_classifier_pipeline.assert_called_once_with(
|
||||
"image-classification",
|
||||
"test_model_name",
|
||||
model_kwargs={"cache_dir": cache_dir},
|
||||
)
|
||||
|
||||
def test_min_score(self, pil_image: Image.Image, mock_classifier_pipeline: mock.Mock) -> None:
|
||||
classifier = ImageClassifier("test_model_name", min_score=0.0)
|
||||
classifier.min_score = 0.0
|
||||
all_labels = classifier.predict(pil_image)
|
||||
classifier.min_score = 0.5
|
||||
filtered_labels = classifier.predict(pil_image)
|
||||
|
||||
assert all_labels == [
|
||||
"that's an image alright",
|
||||
"well it ends with .jpg",
|
||||
"idk",
|
||||
"im just seeing bytes",
|
||||
"not sure",
|
||||
"probably a virus",
|
||||
]
|
||||
assert filtered_labels == ["that's an image alright"]
|
||||
|
||||
|
||||
class TestCLIP:
|
||||
def test_init(self, mock_st: mock.Mock) -> None:
|
||||
CLIPSTEncoder("test_model_name", cache_dir="test_cache")
|
||||
|
||||
mock_st.assert_called_once_with("test_model_name", cache_folder="test_cache")
|
||||
|
||||
def test_basic_image(self, pil_image: Image.Image, mock_st: mock.Mock) -> None:
|
||||
clip_encoder = CLIPSTEncoder("test_model_name", cache_dir="test_cache")
|
||||
embedding = clip_encoder.predict(pil_image)
|
||||
|
||||
assert isinstance(embedding, list)
|
||||
assert len(embedding) == 512
|
||||
assert all([isinstance(num, float) for num in embedding])
|
||||
mock_st.assert_called_once()
|
||||
|
||||
def test_basic_text(self, mock_st: mock.Mock) -> None:
|
||||
clip_encoder = CLIPSTEncoder("test_model_name", cache_dir="test_cache")
|
||||
embedding = clip_encoder.predict("test search query")
|
||||
|
||||
assert isinstance(embedding, list)
|
||||
assert len(embedding) == 512
|
||||
assert all([isinstance(num, float) for num in embedding])
|
||||
mock_st.assert_called_once()
|
||||
|
||||
|
||||
class TestFaceRecognition:
|
||||
def test_init(self, mock_faceanalysis: mock.Mock) -> None:
|
||||
FaceRecognizer("test_model_name", cache_dir="test_cache")
|
||||
|
||||
mock_faceanalysis.assert_called_once_with(
|
||||
name="test_model_name",
|
||||
root="test_cache",
|
||||
allowed_modules=["detection", "recognition"],
|
||||
)
|
||||
|
||||
def test_basic(self, cv_image: cv2.Mat, mock_faceanalysis: mock.Mock) -> None:
|
||||
face_recognizer = FaceRecognizer("test_model_name", min_score=0.0, cache_dir="test_cache")
|
||||
faces = face_recognizer.predict(cv_image)
|
||||
|
||||
assert len(faces) == 2
|
||||
for face in faces:
|
||||
assert face["imageHeight"] == 800
|
||||
assert face["imageWidth"] == 600
|
||||
assert isinstance(face["embedding"], list)
|
||||
assert len(face["embedding"]) == 512
|
||||
assert all([isinstance(num, float) for num in face["embedding"]])
|
||||
|
||||
mock_faceanalysis.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
class TestCache:
|
||||
async def test_caches(self, mock_get_model: mock.Mock) -> None:
|
||||
model_cache = ModelCache()
|
||||
await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
|
||||
await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
|
||||
assert len(model_cache.cache._cache) == 1
|
||||
mock_get_model.assert_called_once()
|
||||
|
||||
async def test_kwargs_used(self, mock_get_model: mock.Mock) -> None:
|
||||
model_cache = ModelCache()
|
||||
await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION, cache_dir="test_cache")
|
||||
mock_get_model.assert_called_once_with(
|
||||
ModelType.IMAGE_CLASSIFICATION, "test_model_name", cache_dir="test_cache"
|
||||
)
|
||||
|
||||
async def test_different_clip(self, mock_get_model: mock.Mock) -> None:
|
||||
model_cache = ModelCache()
|
||||
await model_cache.get("test_image_model_name", ModelType.CLIP)
|
||||
await model_cache.get("test_text_model_name", ModelType.CLIP)
|
||||
mock_get_model.assert_has_calls(
|
||||
[
|
||||
mock.call(ModelType.CLIP, "test_image_model_name"),
|
||||
mock.call(ModelType.CLIP, "test_text_model_name"),
|
||||
]
|
||||
)
|
||||
assert len(model_cache.cache._cache) == 2
|
||||
|
||||
@mock.patch("app.models.cache.OptimisticLock", autospec=True)
|
||||
async def test_model_ttl(self, mock_lock_cls: mock.Mock, mock_get_model: mock.Mock) -> None:
|
||||
model_cache = ModelCache(ttl=100)
|
||||
await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
|
||||
mock_lock_cls.return_value.__aenter__.return_value.cas.assert_called_with(mock.ANY, ttl=100)
|
||||
|
||||
@mock.patch("app.models.cache.SimpleMemoryCache.expire")
|
||||
async def test_revalidate(self, mock_cache_expire: mock.Mock, mock_get_model: mock.Mock) -> None:
|
||||
model_cache = ModelCache(ttl=100, revalidate=True)
|
||||
await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
|
||||
await model_cache.get("test_model_name", ModelType.IMAGE_CLASSIFICATION)
|
||||
mock_cache_expire.assert_called_once_with(mock.ANY, 100)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not settings.test_full,
|
||||
reason="More time-consuming since it deploys the app and loads models.",
|
||||
)
|
||||
class TestEndpoints:
|
||||
def test_tagging_endpoint(self, pil_image: Image.Image, deployed_app: TestClient) -> None:
|
||||
byte_image = BytesIO()
|
||||
pil_image.save(byte_image, format="jpeg")
|
||||
headers = {"Content-Type": "image/jpg"}
|
||||
response = deployed_app.post(
|
||||
"http://localhost:3003/image-classifier/tag-image",
|
||||
content=byte_image.getvalue(),
|
||||
headers=headers,
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
def test_clip_image_endpoint(self, pil_image: Image.Image, deployed_app: TestClient) -> None:
|
||||
byte_image = BytesIO()
|
||||
pil_image.save(byte_image, format="jpeg")
|
||||
headers = {"Content-Type": "image/jpg"}
|
||||
response = deployed_app.post(
|
||||
"http://localhost:3003/sentence-transformer/encode-image",
|
||||
content=byte_image.getvalue(),
|
||||
headers=headers,
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
def test_clip_text_endpoint(self, deployed_app: TestClient) -> None:
|
||||
response = deployed_app.post(
|
||||
"http://localhost:3003/sentence-transformer/encode-text",
|
||||
json={"text": "test search query"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
def test_face_endpoint(self, pil_image: Image.Image, deployed_app: TestClient) -> None:
|
||||
byte_image = BytesIO()
|
||||
pil_image.save(byte_image, format="jpeg")
|
||||
headers = {"Content-Type": "image/jpg"}
|
||||
response = deployed_app.post(
|
||||
"http://localhost:3003/facial-recognition/detect-faces",
|
||||
content=byte_image.getvalue(),
|
||||
headers=headers,
|
||||
)
|
||||
assert response.status_code == 200
|
||||
Reference in New Issue
Block a user