Source code for genai.extensions.llama_index.embeddings
importasynciofromtypingimportOptionalfrompydanticimportFieldfromgenai._typesimportModelLikefromgenai.clientimportClientfromgenai.schemaimportTextEmbeddingParametersfromgenai.text.embedding.embedding_serviceimportCreateExecutionOptionstry:fromllama_index.core.base.embeddings.baseimportBaseEmbedding,EmbeddingexceptImportError:raiseImportError("Could not import llamaindex: Please install ibm-generative-ai[llama-index] extension.")# noqa: B904
[docs]classIBMGenAILlamaIndexEmbedding(BaseEmbedding):client:Clientmodel_id:strparameters:Optional[ModelLike[TextEmbeddingParameters]]=Noneexecution_options:Optional[ModelLike[CreateExecutionOptions]]=None# Batch size is set to 100000 to avoid batching in# LlamaIndex as it is handled by the SDK itselfembed_batch_size:int=Field(default=10000,description="The batch size for embedding calls.",gt=0)