Skip to content

src.prompt_builders.watsonx.llama.llama_prompt_builder.WatsonXLlamaPromptBuilder

Bases: BasePromptBuilder

Prompt builder for the Llama model architecture.

This class handles the construction of prompts and chat messages specifically formatted for Llama models, with support for tool definitions and context-aware message formatting.

Attributes:

Name Type Description
config Dict

Configuration loaded from prompt_builders.yaml.

template Template

Jinja2 template for text generation prompts.

Example
builder = LlamaPromptBuilder()

# Build chat messages
output = await builder.build_chat(PromptPayload(
    conversation_history=history,
    tool_definitions=tools
))

# Build text prompt
output = await builder.build_text(PromptPayload(
    conversation_history=history,
    tool_definitions=tools
))
Source code in src/prompt_builders/watsonx/llama/llama_prompt_builder.py
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
class WatsonXLlamaPromptBuilder(BasePromptBuilder):
    """Prompt builder for the Llama model architecture.

    This class handles the construction of prompts and chat messages specifically
    formatted for Llama models, with support for tool definitions and
    context-aware message formatting.

    Attributes:
        config (Dict): Configuration loaded from prompt_builders.yaml.
        template (Template): Jinja2 template for text generation prompts.

    Example:
        ```python
        builder = LlamaPromptBuilder()

        # Build chat messages
        output = await builder.build_chat(PromptPayload(
            conversation_history=history,
            tool_definitions=tools
        ))

        # Build text prompt
        output = await builder.build_text(PromptPayload(
            conversation_history=history,
            tool_definitions=tools
        ))
        ```
    """
    def __init__(self, template_dir: Optional[str] = None):
        """Initialize the Llama prompt builder.

        Args:
            template_dir (Optional[str]): Custom directory for template files.
                If None, uses default directory 'src/prompt_builders/llama'.
        """
        super().__init__()
        self.config = self._load_config()
        self.template = self._load_template(template_dir) if template_dir else self._load_template()

    async def build_chat(self, payload: PromptPayload) -> PromptBuilderOutput:
        """Build chat messages with tools embedded in system message.

        Creates or modifies the system message to include tool definitions
        while preserving the existing conversation structure.

        Args:
            payload (PromptPayload): The structured input containing conversation history,
                                   tool definitions, and other context-specific data

        Returns:
            PromptBuilderOutput: Contains the modified message list with tool information
        """
        conversation_history = payload.conversation_history
        tool_definitions = payload.tool_definitions or []

        if not tool_definitions:
            return PromptBuilderOutput(chat_messages=conversation_history)

        tool_names = [tool.function.name for tool in tool_definitions]
        tool_section_header = self.config['system_prompt']['header'].format(
            tools=", ".join(tool_names),
            date=datetime.now().strftime('%Y-%m-%d')
        )
        tool_instructions = self.config['system_prompt']['tool_instructions']
        tool_info = await self._build_system_content(tool_definitions, tool_section_header, tool_instructions)
        modified_history = conversation_history.copy()

        if conversation_history and isinstance(conversation_history[0], SystemMessage):
            existing_content = conversation_history[0].content
            modified_history[0] = SystemMessage(content=f"{tool_info}\n{existing_content}")
        else:
            system_msg = SystemMessage(content=tool_info)
            modified_history.insert(0, system_msg)

        return PromptBuilderOutput(chat_messages=modified_history)

    async def build_text(self, payload: PromptPayload) -> PromptBuilderOutput:
        """Build text prompt using Llama-specific template.

        Constructs a complete prompt string using the Jinja2 template,
        incorporating conversation history, tool definitions, and model-specific
        tokens.

        Args:
            payload (PromptPayload): The structured input containing conversation history,
                                   tool definitions, and other context-specific data

        Returns:
            PromptBuilderOutput: Contains the formatted text prompt for generation
        """
        conversation_history = payload.conversation_history
        tool_definitions = payload.tool_definitions or []

        # Preprocess conversation history to flatten user message content
        processed_history = [self._preprocess_message(msg).model_dump() for msg in conversation_history]

        # Format tool definitions for template
        formatted_tools = [
            self._format_tool_for_template(tool)
            for tool in tool_definitions
        ] if tool_definitions else None

        # Prepare template variables
        template_vars = {
            'messages': processed_history,
            'tools': formatted_tools,
            'tools_in_user_message': False,
            'add_generation_prompt': True,
            'date_string': datetime.now().strftime("%d %b %Y"),
            'bos_token': self.config['tokens']['begin_text'],
            'tool_instructions': self.config['system_prompt']['tool_instructions']
        }

        return PromptBuilderOutput(text_prompt=self.template.render(**template_vars))

    @staticmethod
    def _preprocess_message(message: TextChatMessage) -> TextChatMessage:
        """Preprocess message for Llama format."""
        if not isinstance(message, UserMessage):
            return message

        if isinstance(message.content, list):
            # Extract text content from array of content objects
            text_contents = []
            for content in message.content:
                if getattr(content, 'type', None) == 'text':
                    text_contents.append(content.text)

            # Create new UserMessage with flattened content
            return UserMessage(content=" ".join(text_contents))

        return message

    @staticmethod
    def _load_config() -> dict:
        """Load Llama-specific configuration from YAML file."""
        config_path = Path("src/configs/prompt_builders.yaml")
        with config_path.open() as f:
            config = yaml.safe_load(f)
            return config.get('watsonx-llama')

    @staticmethod
    def _load_template(template_dir: str = "src/prompt_builders/watsonx/llama") -> Template:
        """Load Jinja2 template for Llama prompt generation."""
        template_path = Path(template_dir) / "llama-3.3-70b.jinja"
        with open(template_path) as f:
            return Template(f.read())

    @staticmethod
    def _format_tool_for_template(tool: Tool) -> dict:
        """Format tool definition for Llama template usage."""
        return {
            "name": tool.function.name,
            "description": tool.function.description,
            "parameters": {
                "type": "object",
                "properties": tool.function.parameters.properties,
                "required": tool.function.parameters.required
            }
        }

__init__(template_dir=None)

Initialize the Llama prompt builder.

Parameters:

Name Type Description Default
template_dir Optional[str]

Custom directory for template files. If None, uses default directory 'src/prompt_builders/llama'.

None
Source code in src/prompt_builders/watsonx/llama/llama_prompt_builder.py
43
44
45
46
47
48
49
50
51
52
def __init__(self, template_dir: Optional[str] = None):
    """Initialize the Llama prompt builder.

    Args:
        template_dir (Optional[str]): Custom directory for template files.
            If None, uses default directory 'src/prompt_builders/llama'.
    """
    super().__init__()
    self.config = self._load_config()
    self.template = self._load_template(template_dir) if template_dir else self._load_template()

build_chat(payload) async

Build chat messages with tools embedded in system message.

Creates or modifies the system message to include tool definitions while preserving the existing conversation structure.

Parameters:

Name Type Description Default
payload PromptPayload

The structured input containing conversation history, tool definitions, and other context-specific data

required

Returns:

Name Type Description
PromptBuilderOutput PromptBuilderOutput

Contains the modified message list with tool information

Source code in src/prompt_builders/watsonx/llama/llama_prompt_builder.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
async def build_chat(self, payload: PromptPayload) -> PromptBuilderOutput:
    """Build chat messages with tools embedded in system message.

    Creates or modifies the system message to include tool definitions
    while preserving the existing conversation structure.

    Args:
        payload (PromptPayload): The structured input containing conversation history,
                               tool definitions, and other context-specific data

    Returns:
        PromptBuilderOutput: Contains the modified message list with tool information
    """
    conversation_history = payload.conversation_history
    tool_definitions = payload.tool_definitions or []

    if not tool_definitions:
        return PromptBuilderOutput(chat_messages=conversation_history)

    tool_names = [tool.function.name for tool in tool_definitions]
    tool_section_header = self.config['system_prompt']['header'].format(
        tools=", ".join(tool_names),
        date=datetime.now().strftime('%Y-%m-%d')
    )
    tool_instructions = self.config['system_prompt']['tool_instructions']
    tool_info = await self._build_system_content(tool_definitions, tool_section_header, tool_instructions)
    modified_history = conversation_history.copy()

    if conversation_history and isinstance(conversation_history[0], SystemMessage):
        existing_content = conversation_history[0].content
        modified_history[0] = SystemMessage(content=f"{tool_info}\n{existing_content}")
    else:
        system_msg = SystemMessage(content=tool_info)
        modified_history.insert(0, system_msg)

    return PromptBuilderOutput(chat_messages=modified_history)

build_text(payload) async

Build text prompt using Llama-specific template.

Constructs a complete prompt string using the Jinja2 template, incorporating conversation history, tool definitions, and model-specific tokens.

Parameters:

Name Type Description Default
payload PromptPayload

The structured input containing conversation history, tool definitions, and other context-specific data

required

Returns:

Name Type Description
PromptBuilderOutput PromptBuilderOutput

Contains the formatted text prompt for generation

Source code in src/prompt_builders/watsonx/llama/llama_prompt_builder.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
async def build_text(self, payload: PromptPayload) -> PromptBuilderOutput:
    """Build text prompt using Llama-specific template.

    Constructs a complete prompt string using the Jinja2 template,
    incorporating conversation history, tool definitions, and model-specific
    tokens.

    Args:
        payload (PromptPayload): The structured input containing conversation history,
                               tool definitions, and other context-specific data

    Returns:
        PromptBuilderOutput: Contains the formatted text prompt for generation
    """
    conversation_history = payload.conversation_history
    tool_definitions = payload.tool_definitions or []

    # Preprocess conversation history to flatten user message content
    processed_history = [self._preprocess_message(msg).model_dump() for msg in conversation_history]

    # Format tool definitions for template
    formatted_tools = [
        self._format_tool_for_template(tool)
        for tool in tool_definitions
    ] if tool_definitions else None

    # Prepare template variables
    template_vars = {
        'messages': processed_history,
        'tools': formatted_tools,
        'tools_in_user_message': False,
        'add_generation_prompt': True,
        'date_string': datetime.now().strftime("%d %b %Y"),
        'bos_token': self.config['tokens']['begin_text'],
        'tool_instructions': self.config['system_prompt']['tool_instructions']
    }

    return PromptBuilderOutput(text_prompt=self.template.render(**template_vars))