From dd12dcda86931dfa7e98dcdb3e2980f3ad870230 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 9 Jun 2025 06:04:14 +0000 Subject: [PATCH] Auto-generated API code --- docs/reference/api-reference.md | 45 +++++- src/api/api/indices.ts | 2 +- src/api/api/inference.ts | 2 +- src/api/types.ts | 235 +++++++++++++++++++++++++++++--- 4 files changed, 258 insertions(+), 26 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 9291c1b52..399c2a81c 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1651,7 +1651,7 @@ client.search({ ... }) - **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. - **`rescore` (Optional, { window_size, query, learning_to_rank } \| { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. -- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. +- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule, rescorer, linear, pinned })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. - **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. - **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. @@ -6755,9 +6755,45 @@ Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. -The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. +The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: + +``` +{ + "number_of_replicas": 1 +} +``` + +Or you can use an `index` setting object: +``` +{ + "index": { + "number_of_replicas": 1 + } +} +``` + +Or you can use dot annotation: +``` +{ + "index.number_of_replicas": 1 +} +``` + +Or you can embed any of the aforementioned options in a `settings` object. For example: + +``` +{ + "settings": { + "index": { + "number_of_replicas": 1 + } + } +} +``` + NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -7470,12 +7506,9 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ## client.inference.chatCompletionUnified [_inference.chat_completion_unified] Perform chat completion inference -The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. +The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. -IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. -For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 2f0c64f39..d393b0d32 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -3165,7 +3165,7 @@ export default class Indices { } /** - * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. + * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-settings | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 09a5c8813..4d2c76536 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -364,7 +364,7 @@ export default class Inference { } /** - * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. + * Perform chat completion inference The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai` service or the `elastic` service, use the Chat completion inference API. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-unified-inference | Elasticsearch API documentation} */ async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/types.ts b/src/api/types.ts index f862f3489..7ecf44d42 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -3868,7 +3868,7 @@ export interface ErrorCauseKeys { /** The type of error */ type: string /** A human-readable explanation of the error, in English. */ - reason?: string + reason?: string | null /** The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. */ stack_trace?: string caused_by?: ErrorCause @@ -4060,6 +4060,12 @@ export interface InlineGetKeys { export type InlineGet = InlineGetKeys & { [property: string]: any } +export interface InnerRetriever { + retriever: RetrieverContainer + weight: float + normalizer: ScoreNormalizer +} + export type Ip = string export interface KnnQuery extends QueryDslQueryBase { @@ -4135,6 +4141,12 @@ export type Level = 'cluster' | 'indices' | 'shards' export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' +export interface LinearRetriever extends RetrieverBase { + /** Inner retrievers. */ + retrievers?: InnerRetriever[] + rank_window_size: integer +} + export type MapboxVectorTiles = ArrayBuffer export interface MergesStats { @@ -4231,6 +4243,14 @@ export type Password = string export type Percentage = string | float +export interface PinnedRetriever extends RetrieverBase { + /** Inner retriever. */ + retriever: RetrieverContainer + ids?: string[] + docs?: SpecifiedDocument[] + rank_window_size: integer +} + export type PipelineName = string export interface PluginStats { @@ -4330,6 +4350,12 @@ export interface RescoreVector { oversample: float } +export interface RescorerRetriever extends RetrieverBase { + /** Inner retriever. */ + retriever: RetrieverContainer + rescore: SearchRescore | SearchRescore[] +} + export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export interface Retries { @@ -4344,6 +4370,8 @@ export interface RetrieverBase { filter?: QueryDslQueryContainer | QueryDslQueryContainer[] /** Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */ min_score?: float + /** Retriever name. */ + _name?: string } export interface RetrieverContainer { @@ -4357,6 +4385,13 @@ export interface RetrieverContainer { text_similarity_reranker?: TextSimilarityReranker /** A retriever that replaces the functionality of a rule query. */ rule?: RuleRetriever + /** A retriever that re-scores only the results produced by its child retriever. */ + rescorer?: RescorerRetriever + /** A retriever that supports the combination of different retrievers through a weighted linear combination. */ + linear?: LinearRetriever + /** A pinned retriever applies pinned documents to the underlying retriever. + * This retriever will rewrite to a PinnedQueryBuilder. */ + pinned?: PinnedRetriever } export type Routing = string @@ -4370,7 +4405,7 @@ export interface RrfRank { export interface RuleRetriever extends RetrieverBase { /** The ruleset IDs containing the rules this retriever is evaluating against. */ - ruleset_ids: Id[] + ruleset_ids: Id | Id[] /** The match criteria that will determine if a rule in the provided rulesets should be applied. */ match_criteria: any /** The retriever whose results rules should be applied to. */ @@ -4381,6 +4416,8 @@ export interface RuleRetriever extends RetrieverBase { export type ScalarValue = long | double | string | boolean | null +export type ScoreNormalizer = 'none' | 'minmax' + export interface ScoreSort { order?: SortOrder } @@ -4563,6 +4600,11 @@ export type SortOrder = 'asc' | 'desc' export type SortResults = FieldValue[] +export interface SpecifiedDocument { + index?: IndexName + id: Id +} + export interface StandardRetriever extends RetrieverBase { /** Defines a query to retrieve a set of top documents. */ query?: QueryDslQueryContainer @@ -8673,7 +8715,7 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys export type QueryDslGeoExecution = 'memory' | 'indexed' export interface QueryDslGeoGridQuery extends QueryDslQueryBase { - geogrid?: GeoTile + geotile?: GeoTile geohash?: GeoHash geohex?: GeoHexCell } @@ -8777,6 +8819,8 @@ export interface QueryDslIntervalsContainer { match?: QueryDslIntervalsMatch /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix + range?: QueryDslIntervalsRange + regexp?: QueryDslIntervalsRegexp /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } @@ -8857,10 +8901,38 @@ export interface QueryDslIntervalsQuery extends QueryDslQueryBase { match?: QueryDslIntervalsMatch /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix + range?: QueryDslIntervalsRange + regexp?: QueryDslIntervalsRegexp /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } +export interface QueryDslIntervalsRange { + /** Analyzer used to analyze the `prefix`. */ + analyzer?: string + /** Lower term, either gte or gt must be provided. */ + gte?: string + /** Lower term, either gte or gt must be provided. */ + gt?: string + /** Upper term, either lte or lt must be provided. */ + lte?: string + /** Upper term, either lte or lt must be provided. */ + lt?: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + +export interface QueryDslIntervalsRegexp { + /** Analyzer used to analyze the `prefix`. */ + analyzer?: string + /** Regex pattern. */ + pattern: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + export interface QueryDslIntervalsWildcard { /** Analyzer used to analyze the `pattern`. * Defaults to the top-level field's analyzer. */ @@ -9415,7 +9487,8 @@ export interface QueryDslRegexpQuery extends QueryDslQueryBase { export interface QueryDslRuleQuery extends QueryDslQueryBase { organic: QueryDslQueryContainer - ruleset_ids: Id[] + ruleset_ids?: Id | Id[] + ruleset_id?: string match_criteria: any } @@ -22115,6 +22188,76 @@ export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceI task_type: InferenceTaskTypeAlibabaCloudAI } +export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAmazonBedrock +} + +export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAnthropic +} + +export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAzureAIStudio +} + +export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAzureOpenAI +} + +export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeCohere +} + +export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeELSER +} + +export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeElasticsearch +} + +export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeGoogleAIStudio +} + +export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeGoogleVertexAI +} + +export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeHuggingFace +} + export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint { /** The inference Id */ inference_id: string @@ -22122,6 +22265,34 @@ export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInference task_type: InferenceTaskTypeJinaAi } +export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeMistral +} + +export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeOpenAI +} + +export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeVoyageAI +} + +export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeWatsonx +} + export interface InferenceInferenceResult { text_embedding_bytes?: InferenceTextEmbeddingByteResult[] text_embedding_bits?: InferenceTextEmbeddingByteResult[] @@ -22306,8 +22477,36 @@ export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding' +export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion' + +export type InferenceTaskTypeAnthropic = 'completion' + +export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion' + +export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion' + +export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion' + +export type InferenceTaskTypeELSER = 'sparse_embedding' + +export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank' + +export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion' + +export type InferenceTaskTypeGoogleVertexAI = 'text_embedding' | 'rerank' + +export type InferenceTaskTypeHuggingFace = 'text_embedding' + export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank' +export type InferenceTaskTypeMistral = 'text_embedding' + +export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion' + +export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank' + +export type InferenceTaskTypeWatsonx = 'text_embedding' + export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector } @@ -22554,7 +22753,7 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfo +export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock export interface InferencePutAnthropicRequest extends RequestBase { /** The task type. @@ -22577,7 +22776,7 @@ export interface InferencePutAnthropicRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfo +export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic export interface InferencePutAzureaistudioRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22599,7 +22798,7 @@ export interface InferencePutAzureaistudioRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfo +export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio export interface InferencePutAzureopenaiRequest extends RequestBase { /** The type of the inference task that the model will perform. @@ -22622,7 +22821,7 @@ export interface InferencePutAzureopenaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfo +export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI export interface InferencePutCohereRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22645,7 +22844,7 @@ export interface InferencePutCohereRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutCohereResponse = InferenceInferenceEndpointInfo +export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere export interface InferencePutElasticsearchRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22668,7 +22867,7 @@ export interface InferencePutElasticsearchRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfo +export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch export interface InferencePutElserRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22687,7 +22886,7 @@ export interface InferencePutElserRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export type InferencePutElserResponse = InferenceInferenceEndpointInfo +export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER export interface InferencePutGoogleaistudioRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22706,7 +22905,7 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfo +export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio export interface InferencePutGooglevertexaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22728,7 +22927,7 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfo +export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI export interface InferencePutHuggingFaceRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22747,7 +22946,7 @@ export interface InferencePutHuggingFaceRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfo +export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace export interface InferencePutJinaaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22789,7 +22988,7 @@ export interface InferencePutMistralRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } } -export type InferencePutMistralResponse = InferenceInferenceEndpointInfo +export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral export interface InferencePutOpenaiRequest extends RequestBase { /** The type of the inference task that the model will perform. @@ -22812,7 +23011,7 @@ export interface InferencePutOpenaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo +export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI export interface InferencePutVoyageaiRequest extends RequestBase { /** The type of the inference task that the model will perform. */ @@ -22834,7 +23033,7 @@ export interface InferencePutVoyageaiRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo +export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI export interface InferencePutWatsonxRequest extends RequestBase { /** The task type. @@ -22852,7 +23051,7 @@ export interface InferencePutWatsonxRequest extends RequestBase { querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } } -export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo +export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx export interface InferenceRerankRequest extends RequestBase { /** The unique identifier for the inference endpoint. */