"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.DEFAULT_TRAINED_MODELS_PAGE_SIZE = void 0; exports.trainedModelsRoutes = trainedModelsRoutes; var _configSchema = require("@kbn/config-schema"); var _app = require("../../common/constants/app"); var _error_wrapper = require("../client/error_wrapper"); var _inference_schema = require("./schemas/inference_schema"); var _log = require("../lib/log"); var _anomaly_detectors_schema = require("./schemas/anomaly_detectors_schema"); var _model_management = require("../models/model_management"); /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ const DEFAULT_TRAINED_MODELS_PAGE_SIZE = 10000; exports.DEFAULT_TRAINED_MODELS_PAGE_SIZE = DEFAULT_TRAINED_MODELS_PAGE_SIZE; function trainedModelsRoutes({ router, routeGuard }) { /** * @apiGroup TrainedModels * * @api {get} /internal/ml/trained_models/:modelId Get info of a trained inference model * @apiName GetTrainedModel * @apiDescription Retrieves configuration information for a trained model. */ router.versioned.get({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/{modelId?}`, access: 'internal', options: { tags: ['access:ml:canGetTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.optionalModelIdSchema, query: _inference_schema.getInferenceQuerySchema } } }, routeGuard.fullLicenseAPIGuard(async ({ client, mlClient, request, response }) => { try { const { modelId } = request.params; const { with_pipelines: withPipelines, ...query } = request.query; const body = await mlClient.getTrainedModels({ ...query, ...(modelId ? { model_id: modelId } : {}), size: DEFAULT_TRAINED_MODELS_PAGE_SIZE }); // model_type is missing // @ts-ignore const result = body.trained_model_configs; try { if (withPipelines) { // Also need to retrieve the list of deployment IDs from stats const stats = await mlClient.getTrainedModelsStats({ ...(modelId ? { model_id: modelId } : {}), size: 10000 }); const modelDeploymentsMap = stats.trained_model_stats.reduce((acc, curr) => { if (!curr.deployment_stats) return acc; // @ts-ignore elasticsearch-js client is missing deployment_id const deploymentId = curr.deployment_stats.deployment_id; if (acc[curr.model_id]) { acc[curr.model_id].push(deploymentId); } else { acc[curr.model_id] = [deploymentId]; } return acc; }, {}); const modelIdsAndAliases = Array.from(new Set([...result.map(({ model_id: id, metadata }) => { var _metadata$model_alias; return [id, ...((_metadata$model_alias = metadata === null || metadata === void 0 ? void 0 : metadata.model_aliases) !== null && _metadata$model_alias !== void 0 ? _metadata$model_alias : [])]; }).flat(), ...Object.values(modelDeploymentsMap).flat()])); const pipelinesResponse = await (0, _model_management.modelsProvider)(client).getModelsPipelines(modelIdsAndAliases); for (const model of result) { var _pipelinesResponse$ge, _model$metadata$model, _model$metadata, _modelDeploymentsMap$; model.pipelines = { ...((_pipelinesResponse$ge = pipelinesResponse.get(model.model_id)) !== null && _pipelinesResponse$ge !== void 0 ? _pipelinesResponse$ge : {}), ...((_model$metadata$model = (_model$metadata = model.metadata) === null || _model$metadata === void 0 ? void 0 : _model$metadata.model_aliases) !== null && _model$metadata$model !== void 0 ? _model$metadata$model : []).reduce((acc, alias) => { var _pipelinesResponse$ge2; return Object.assign(acc, (_pipelinesResponse$ge2 = pipelinesResponse.get(alias)) !== null && _pipelinesResponse$ge2 !== void 0 ? _pipelinesResponse$ge2 : {}); }, {}), ...((_modelDeploymentsMap$ = modelDeploymentsMap[model.model_id]) !== null && _modelDeploymentsMap$ !== void 0 ? _modelDeploymentsMap$ : []).reduce((acc, deploymentId) => { var _pipelinesResponse$ge3; return Object.assign(acc, (_pipelinesResponse$ge3 = pipelinesResponse.get(deploymentId)) !== null && _pipelinesResponse$ge3 !== void 0 ? _pipelinesResponse$ge3 : {}); }, {}) }; } } } catch (e) { // the user might not have required permissions to fetch pipelines // log the error to the debug log as this might be a common situation and // we don't need to fill kibana's log with these messages. _log.mlLog.debug(e); } return response.ok({ body: result }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {get} /internal/ml/trained_models/_stats Get stats for all trained models * @apiName GetTrainedModelStats * @apiDescription Retrieves usage information for all trained models. */ router.versioned.get({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/_stats`, access: 'internal', options: { tags: ['access:ml:canGetTrainedModels'] } }).addVersion({ version: '1', validate: false }, routeGuard.fullLicenseAPIGuard(async ({ mlClient, request, response }) => { try { const body = await mlClient.getTrainedModelsStats({ size: DEFAULT_TRAINED_MODELS_PAGE_SIZE }); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {get} /internal/ml/trained_models/:modelId/_stats Get stats of a trained model * @apiName GetTrainedModelStatsById * @apiDescription Retrieves usage information for trained models. */ router.versioned.get({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/{modelId}/_stats`, access: 'internal', options: { tags: ['access:ml:canGetTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.modelIdSchema } } }, routeGuard.fullLicenseAPIGuard(async ({ mlClient, request, response }) => { try { const { modelId } = request.params; const body = await mlClient.getTrainedModelsStats({ ...(modelId ? { model_id: modelId } : {}) }); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {get} /internal/ml/trained_models/:modelId/pipelines Get trained model pipelines * @apiName GetTrainedModelPipelines * @apiDescription Retrieves pipelines associated with a trained model */ router.versioned.get({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/{modelId}/pipelines`, access: 'internal', options: { tags: ['access:ml:canGetTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.modelIdSchema } } }, routeGuard.fullLicenseAPIGuard(async ({ client, request, mlClient, response }) => { try { const { modelId } = request.params; const result = await (0, _model_management.modelsProvider)(client).getModelsPipelines(modelId.split(',')); return response.ok({ body: [...result].map(([id, pipelines]) => ({ model_id: id, pipelines })) }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {get} /internal/ml/trained_models/ingest_pipelines Get ingest pipelines * @apiName GetIngestPipelines * @apiDescription Retrieves pipelines */ router.versioned.get({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/ingest_pipelines`, access: 'internal', options: { tags: ['access:ml:canGetTrainedModels'] // TODO: update permissions } }).addVersion({ version: '1', validate: false }, routeGuard.fullLicenseAPIGuard(async ({ client, request, mlClient, response }) => { try { const body = await (0, _model_management.modelsProvider)(client).getPipelines(); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {post} /internal/ml/trained_models/create_inference_pipeline creates the pipeline with inference processor * @apiName CreateInferencePipeline * @apiDescription Creates the inference pipeline */ router.versioned.post({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/create_inference_pipeline`, access: 'internal', options: { tags: ['access:ml:canCreateTrainedModels'] } }).addVersion({ version: '1', validate: { request: { body: _inference_schema.createIngestPipelineSchema } } }, routeGuard.fullLicenseAPIGuard(async ({ client, request, mlClient, response }) => { try { const { pipeline, pipelineName } = request.body; const body = await (0, _model_management.modelsProvider)(client).createInferencePipeline(pipeline, pipelineName); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {put} /internal/ml/trained_models/:modelId Put a trained model * @apiName PutTrainedModel * @apiDescription Adds a new trained model */ router.versioned.put({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/{modelId}`, access: 'internal', options: { tags: ['access:ml:canCreateTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.modelIdSchema, body: _configSchema.schema.any(), query: _inference_schema.putTrainedModelQuerySchema } } }, routeGuard.fullLicenseAPIGuard(async ({ mlClient, request, response }) => { try { var _request$query; const { modelId } = request.params; const body = await mlClient.putTrainedModel({ model_id: modelId, body: request.body, ...((_request$query = request.query) !== null && _request$query !== void 0 && _request$query.defer_definition_decompression ? { defer_definition_decompression: true } : {}) }); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {delete} /internal/ml/trained_models/:modelId Delete a trained model * @apiName DeleteTrainedModel * @apiDescription Deletes an existing trained model that is currently not referenced by an ingest pipeline. */ router.versioned.delete({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/{modelId}`, access: 'internal', options: { tags: ['access:ml:canDeleteTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.modelIdSchema, query: _inference_schema.deleteTrainedModelQuerySchema } } }, routeGuard.fullLicenseAPIGuard(async ({ mlClient, request, response, client }) => { try { const { modelId } = request.params; const { with_pipelines: withPipelines, force } = request.query; if (withPipelines) { // first we need to delete pipelines, otherwise ml api return an error await (0, _model_management.modelsProvider)(client).deleteModelPipelines(modelId.split(',')); } const body = await mlClient.deleteTrainedModel({ model_id: modelId, force }); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {post} /internal/ml/trained_models/:modelId/deployment/_start Start trained model deployment * @apiName StartTrainedModelDeployment * @apiDescription Starts trained model deployment. */ router.versioned.post({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/{modelId}/deployment/_start`, access: 'internal', options: { tags: ['access:ml:canStartStopTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.modelIdSchema, query: _inference_schema.threadingParamsSchema } } }, routeGuard.fullLicenseAPIGuard(async ({ mlClient, request, response }) => { try { const { modelId } = request.params; const body = await mlClient.startTrainedModelDeployment({ model_id: modelId, ...(request.query ? request.query : {}) }); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {post} /internal/ml/trained_models/:modelId/deployment/_update Update trained model deployment * @apiName UpdateTrainedModelDeployment * @apiDescription Updates trained model deployment. */ router.versioned.post({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/{modelId}/{deploymentId}/deployment/_update`, access: 'internal', options: { tags: ['access:ml:canStartStopTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.modelAndDeploymentIdSchema, body: _inference_schema.updateDeploymentParamsSchema } } }, routeGuard.fullLicenseAPIGuard(async ({ mlClient, request, response }) => { try { const { modelId, deploymentId } = request.params; const body = await mlClient.updateTrainedModelDeployment({ model_id: modelId, deployment_id: deploymentId, ...request.body }); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {post} /internal/ml/trained_models/:modelId/deployment/_stop Stop trained model deployment * @apiName StopTrainedModelDeployment * @apiDescription Stops trained model deployment. */ router.versioned.post({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/{modelId}/{deploymentId}/deployment/_stop`, access: 'internal', options: { tags: ['access:ml:canStartStopTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.modelAndDeploymentIdSchema, query: _anomaly_detectors_schema.forceQuerySchema } } }, routeGuard.fullLicenseAPIGuard(async ({ mlClient, request, response }) => { try { const { deploymentId, modelId } = request.params; const results = {}; for (const id of deploymentId.split(',')) { try { var _request$query$force; const { stopped: success } = await mlClient.stopTrainedModelDeployment({ model_id: modelId, deployment_id: id, force: (_request$query$force = request.query.force) !== null && _request$query$force !== void 0 ? _request$query$force : false, allow_no_match: false }); results[id] = { success }; } catch (error) { results[id] = { success: false, error }; } } return response.ok({ body: results }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {post} /internal/ml/trained_models/pipeline_simulate Simulates an ingest pipeline * @apiName SimulateIngestPipeline * @apiDescription Simulates an ingest pipeline. */ router.versioned.post({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/pipeline_simulate`, access: 'internal', options: { tags: ['access:ml:canTestTrainedModels'] } }).addVersion({ version: '1', validate: { request: { body: _inference_schema.pipelineSimulateBody } } }, routeGuard.fullLicenseAPIGuard(async ({ client, request, response }) => { try { const { pipeline, docs } = request.body; const body = await client.asInternalUser.ingest.simulate({ pipeline, docs }); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); /** * @apiGroup TrainedModels * * @api {post} /internal/ml/trained_models/infer/:modelId Evaluates a trained model * @apiName InferTrainedModelDeployment * @apiDescription Evaluates a trained model. */ router.versioned.post({ path: `${_app.ML_INTERNAL_BASE_PATH}/trained_models/infer/{modelId}/{deploymentId}`, access: 'internal', options: { tags: ['access:ml:canTestTrainedModels'] } }).addVersion({ version: '1', validate: { request: { params: _inference_schema.modelAndDeploymentIdSchema, query: _inference_schema.inferTrainedModelQuery, body: _inference_schema.inferTrainedModelBody } } }, routeGuard.fullLicenseAPIGuard(async ({ mlClient, request, response }) => { try { const { modelId, deploymentId } = request.params; const body = await mlClient.inferTrainedModel({ model_id: modelId, deployment_id: deploymentId, body: { docs: request.body.docs, ...(request.body.inference_config ? { inference_config: request.body.inference_config } : {}) }, ...(request.query.timeout ? { timeout: request.query.timeout } : {}) }); return response.ok({ body }); } catch (e) { return response.customError((0, _error_wrapper.wrapError)(e)); } })); }