mirror of
https://github.com/KevinMidboe/immich.git
synced 2025-10-29 17:40:28 +00:00
feat(web,server)!: configure machine learning via the UI (#3768)
This commit is contained in:
@@ -37,6 +37,12 @@ export enum SystemConfigKey {
|
||||
JOB_SEARCH_CONCURRENCY = 'job.search.concurrency',
|
||||
JOB_SIDECAR_CONCURRENCY = 'job.sidecar.concurrency',
|
||||
|
||||
MACHINE_LEARNING_ENABLED = 'machineLearning.enabled',
|
||||
MACHINE_LEARNING_URL = 'machineLearning.url',
|
||||
MACHINE_LEARNING_FACIAL_RECOGNITION_ENABLED = 'machineLearning.facialRecognitionEnabled',
|
||||
MACHINE_LEARNING_TAG_IMAGE_ENABLED = 'machineLearning.tagImageEnabled',
|
||||
MACHINE_LEARNING_CLIP_ENCODE_ENABLED = 'machineLearning.clipEncodeEnabled',
|
||||
|
||||
OAUTH_ENABLED = 'oauth.enabled',
|
||||
OAUTH_ISSUER_URL = 'oauth.issuerUrl',
|
||||
OAUTH_CLIENT_ID = 'oauth.clientId',
|
||||
@@ -105,6 +111,13 @@ export interface SystemConfig {
|
||||
tonemap: ToneMapping;
|
||||
};
|
||||
job: Record<QueueName, { concurrency: number }>;
|
||||
machineLearning: {
|
||||
enabled: boolean;
|
||||
url: string;
|
||||
clipEncodeEnabled: boolean;
|
||||
facialRecognitionEnabled: boolean;
|
||||
tagImageEnabled: boolean;
|
||||
};
|
||||
oauth: {
|
||||
enabled: boolean;
|
||||
issuerUrl: string;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { DetectFaceResult, IMachineLearningRepository, MachineLearningInput, MACHINE_LEARNING_URL } from '@app/domain';
|
||||
import { DetectFaceResult, IMachineLearningRepository, MachineLearningInput } from '@app/domain';
|
||||
import { Injectable } from '@nestjs/common';
|
||||
import axios from 'axios';
|
||||
import { createReadStream } from 'fs';
|
||||
|
||||
const client = axios.create({ baseURL: MACHINE_LEARNING_URL });
|
||||
const client = axios.create();
|
||||
|
||||
@Injectable()
|
||||
export class MachineLearningRepository implements IMachineLearningRepository {
|
||||
@@ -11,19 +11,19 @@ export class MachineLearningRepository implements IMachineLearningRepository {
|
||||
return client.post<T>(endpoint, createReadStream(input.imagePath)).then((res) => res.data);
|
||||
}
|
||||
|
||||
classifyImage(input: MachineLearningInput): Promise<string[]> {
|
||||
return this.post<string[]>(input, '/image-classifier/tag-image');
|
||||
classifyImage(url: string, input: MachineLearningInput): Promise<string[]> {
|
||||
return this.post<string[]>(input, `${url}/image-classifier/tag-image`);
|
||||
}
|
||||
|
||||
detectFaces(input: MachineLearningInput): Promise<DetectFaceResult[]> {
|
||||
return this.post<DetectFaceResult[]>(input, '/facial-recognition/detect-faces');
|
||||
detectFaces(url: string, input: MachineLearningInput): Promise<DetectFaceResult[]> {
|
||||
return this.post<DetectFaceResult[]>(input, `${url}/facial-recognition/detect-faces`);
|
||||
}
|
||||
|
||||
encodeImage(input: MachineLearningInput): Promise<number[]> {
|
||||
return this.post<number[]>(input, '/sentence-transformer/encode-image');
|
||||
encodeImage(url: string, input: MachineLearningInput): Promise<number[]> {
|
||||
return this.post<number[]>(input, `${url}/sentence-transformer/encode-image`);
|
||||
}
|
||||
|
||||
encodeText(input: string): Promise<number[]> {
|
||||
return client.post<number[]>('/sentence-transformer/encode-text', { text: input }).then((res) => res.data);
|
||||
encodeText(url: string, input: string): Promise<number[]> {
|
||||
return client.post<number[]>(`${url}/sentence-transformer/encode-text`, { text: input }).then((res) => res.data);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user