feat(ml): export clip models to ONNX and host models on Hugging Face (#4700)

* export clip models

* export to hf

refactored export code

* export mclip, general refactoring

cleanup

* updated conda deps

* do transforms with pillow and numpy, add tokenization config to export, general refactoring

* moved conda dockerfile, re-added poetry

* minor fixes

* updated link

* updated tests

* removed `requirements.txt` from workflow

* fixed mimalloc path

* removed torchvision

* cleaner np typing

* review suggestions

* update default model name

* update test
This commit is contained in:
Mert
2023-10-31 06:02:04 -04:00
committed by GitHub
parent 3212a47720
commit 87a0ba3db3
29 changed files with 6192 additions and 2043 deletions

View File

@@ -193,7 +193,7 @@ describe(SmartInfoService.name, () => {
expect(machineMock.encodeImage).toHaveBeenCalledWith(
'http://immich-machine-learning:3003',
{ imagePath: 'path/to/resize.ext' },
{ enabled: true, modelName: 'ViT-B-32::openai' },
{ enabled: true, modelName: 'ViT-B-32__openai' },
);
expect(smartMock.upsert).toHaveBeenCalledWith({
assetId: 'asset-1',

View File

@@ -67,7 +67,7 @@ export const defaults = Object.freeze<SystemConfig>({
},
clip: {
enabled: true,
modelName: 'ViT-B-32::openai',
modelName: 'ViT-B-32__openai',
},
facialRecognition: {
enabled: true,

View File

@@ -68,7 +68,7 @@ const updatedConfig = Object.freeze<SystemConfig>({
},
clip: {
enabled: true,
modelName: 'ViT-B-32::openai',
modelName: 'ViT-B-32__openai',
},
facialRecognition: {
enabled: true,