genai.extensions.langchain package#

Extension for LangChain library

class genai.extensions.langchain.LangChainChatInterface[source]#

Bases: BaseChatModel

Class representing the LangChainChatInterface for interacting with the LangChain chat API.

Example:

from genai import Client, Credentials
from genai.extensions.langchain import LangChainChatInterface
from langchain_core.messages import HumanMessage, SystemMessage
from genai.schema import TextGenerationParameters

client = Client(credentials=Credentials.from_env())
llm = LangChainChatInterface(
    client=client,
    model_id="meta-llama/llama-3-70b-instruct",
    parameters=TextGenerationParameters(
        max_new_tokens=250,
    )
)

response = chat_model.generate(messages=[HumanMessage(content="Hello world!")])
print(response)
client: Client#
conversation_id: str | None#
get_num_tokens(text)[source]#

Get the number of tokens present in the text.

Useful for checking if an input will fit in a model’s context window.

Parameters:

text (str) – The string input to tokenize.

Returns:

The integer number of tokens in the text.

Return type:

int

get_num_tokens_from_messages(messages)[source]#

Get the number of tokens in the messages.

Useful for checking if an input will fit in a model’s context window.

Parameters:

messages (list[BaseMessage]) – The message inputs to tokenize.

Returns:

The sum of the number of tokens across the messages.

Return type:

int

get_token_ids(text)[source]#

Return the ordered ids of the tokens in a text.

Parameters:

text (str) – The string input to tokenize.

Returns:

A list of ids corresponding to the tokens in the text, in order they occur

in the text.

Return type:

list[int]

classmethod is_lc_serializable()[source]#

Is this class serializable?

Return type:

bool

property lc_secrets: dict[str, str]#

A map of constructor argument names to secret ids.

For example,

{“openai_api_key”: “OPENAI_API_KEY”}

classmethod load_from_file(file, *, client)[source]#
Parameters:
  • file (str | Path) –

  • client (Client) –

model_id: str#
moderations: ModerationParameters | None#
parameters: TextGenerationParameters | None#
parent_id: str | None#
prompt_id: str | None#
prompt_template_id: str | None#
streaming: bool | None#
trim_method: str | TrimMethod | None#
use_conversation_parameters: bool | None#
classmethod validate_data_models(value, values, config, field)[source]#
pydantic model genai.extensions.langchain.LangChainEmbeddingsInterface[source]#

Bases: BaseModel, Embeddings

Class representing the LangChainChatInterface for interacting with the LangChain chat API.

Example:

from genai import Client, Credentials
from genai.extensions.langchain import LangChainEmbeddingsInterface
from genai.text.embedding import TextEmbeddingParameters

client = Client(credentials=Credentials.from_env())
embeddings = LangChainEmbeddingsInterface(
    client=client,
    model_id="sentence-transformers/all-minilm-l6-v2",
    parameters=TextEmbeddingParameters(truncate_input_tokens=True)
)

embeddings.embed_query("Hello world!")
embeddings.embed_documents(["First document", "Second document"])
Config:
  • extra: str = forbid

  • protected_namespaces: tuple = ()

  • arbitrary_types_allowed: bool = True

field client: Client [Required]#
field execution_options: ModelLike[CreateExecutionOptions] | None = None#
field model_id: str [Required]#
field parameters: ModelLike[TextEmbeddingParameters] | None = None#
async aembed_documents(texts)[source]#

Asynchronous Embed search documents

Parameters:

texts (List[str]) –

Return type:

list[list[float]]

async aembed_query(text)[source]#

Asynchronous Embed query text.

Parameters:

text (str) –

Return type:

List[float]

embed_documents(texts)[source]#

Embed search documents

Parameters:

texts (list[str]) –

Return type:

list[list[float]]

embed_query(text)[source]#

Embed query text.

Parameters:

text (str) –

Return type:

list[float]

class genai.extensions.langchain.LangChainInterface[source]#

Bases: LLM

Class representing the LangChainChatInterface for interacting with the LangChain chat API.

Example:

from genai import Client, Credentials
from genai.extensions.langchain import LangChainInterface
from genai.schema import TextGenerationParameters

client = Client(credentials=Credentials.from_env())
llm = LangChainInterface(
    client=client,
    model_id="meta-llama/llama-3-70b-instruct",
    parameters=TextGenerationParameters(max_new_tokens=50)
)

response = chat_model.generate(prompts=["Hello world!"])
print(response)
client: Client#
data: PromptTemplateData | None#
execution_options: CreateExecutionOptions | None#
get_num_tokens(text)[source]#

Get the number of tokens present in the text.

Useful for checking if an input will fit in a model’s context window.

Parameters:

text (str) – The string input to tokenize.

Returns:

The integer number of tokens in the text.

Return type:

int

get_num_tokens_from_messages(messages)[source]#

Get the number of tokens in the messages.

Useful for checking if an input will fit in a model’s context window.

Parameters:

messages (list[BaseMessage]) – The message inputs to tokenize.

Returns:

The sum of the number of tokens across the messages.

Return type:

int

get_token_ids(text)[source]#

Return the ordered ids of the tokens in a text.

Parameters:

text (str) – The string input to tokenize.

Returns:

A list of ids corresponding to the tokens in the text, in order they occur

in the text.

Return type:

list[int]

classmethod is_lc_serializable()[source]#

Is this class serializable?

Return type:

bool

property lc_secrets: dict[str, str]#

A map of constructor argument names to secret ids.

For example,

{“openai_api_key”: “OPENAI_API_KEY”}

classmethod load_from_file(file, *, client)[source]#
Parameters:
  • file (str | Path) –

  • client (Client) –

model_id: str#
moderations: ModerationParameters | None#
parameters: TextGenerationParameters | None#
prompt_id: str | None#
streaming: bool | None#
genai.extensions.langchain.from_langchain_template(template)[source]#

Convert langchain template variables to mustache template variables

Parameters:

template (str) –

Return type:

str

genai.extensions.langchain.to_langchain_template(template)[source]#

Convert mustache template variables to langchain template variables

Parameters:

template (str) –

Return type:

str

Submodules#