@@ -73,6 +73,7 @@ class MyLLMOutput(LLMOutput):
7373 "LLMConfig" ,
7474 "LLMErrorOutput" ,
7575 "LLMOutput" ,
76+ "LLMReasoningOutput" ,
7677 "create_error_result" ,
7778 "create_llm_check_fn" ,
7879]
@@ -87,6 +88,9 @@ class LLMConfig(BaseModel):
8788 model (str): The LLM model to use for checking the text.
8889 confidence_threshold (float): Minimum confidence required to trigger the guardrail,
8990 as a float between 0.0 and 1.0.
91+ include_reasoning (bool): Whether to include reasoning/explanation in guardrail
92+ output. Useful for development and debugging, but can be disabled in production
93+ to save tokens. Defaults to True.
9094 """
9195
9296 model : str = Field (..., description = "LLM model to use for checking the text" )
@@ -96,6 +100,13 @@ class LLMConfig(BaseModel):
96100 ge = 0.0 ,
97101 le = 1.0 ,
98102 )
103+ include_reasoning : bool = Field (
104+ False ,
105+ description = (
106+ "Include reasoning/explanation fields in output. "
107+ "Defaults to False for token efficiency. Enable for development/debugging."
108+ ),
109+ )
99110
100111 model_config = ConfigDict (extra = "forbid" )
101112
@@ -117,6 +128,19 @@ class LLMOutput(BaseModel):
117128 confidence : float
118129
119130
131+ class LLMReasoningOutput (LLMOutput ):
132+ """Extended LLM output schema with reasoning explanation.
133+
134+ Extends LLMOutput to include a reason field explaining the decision.
135+ This is the standard extended output for guardrails that include reasoning.
136+
137+ Attributes:
138+ reason (str): Explanation for why the input was flagged or not flagged.
139+ """
140+
141+ reason : str = Field (..., description = "Explanation for the flagging decision" )
142+
143+
120144class LLMErrorOutput (LLMOutput ):
121145 """Extended LLM output schema with error information.
122146
@@ -399,7 +423,7 @@ def create_llm_check_fn(
399423 name : str ,
400424 description : str ,
401425 system_prompt : str ,
402- output_model : type [LLMOutput ] = LLMOutput ,
426+ output_model : type [LLMOutput ] | None = None ,
403427 config_model : type [TLLMCfg ] = LLMConfig , # type: ignore[assignment]
404428) -> CheckFn [GuardrailLLMContextProto , str , TLLMCfg ]:
405429 """Factory for constructing and registering an LLM-based guardrail check_fn.
@@ -409,17 +433,25 @@ def create_llm_check_fn(
409433 use the configured LLM to analyze text, validate the result, and trigger if
410434 confidence exceeds the provided threshold.
411435
436+ When `include_reasoning=True` in the config, the guardrail will automatically
437+ use an extended output model with a `reason` field. When `include_reasoning=False`,
438+ it uses the base `LLMOutput` model (only `flagged` and `confidence` fields).
439+
412440 Args:
413441 name (str): Name under which to register the guardrail.
414442 description (str): Short explanation of the guardrail's logic.
415443 system_prompt (str): Prompt passed to the LLM to control analysis.
416- output_model (type[LLMOutput]): Schema for parsing the LLM output.
444+ output_model (type[LLMOutput] | None): Custom schema for parsing the LLM output.
445+ If None (default), uses `LLMReasoningOutput` when reasoning is enabled.
446+ Provide a custom model only if you need additional fields beyond `reason`.
417447 config_model (type[LLMConfig]): Configuration schema for the check_fn.
418448
419449 Returns:
420450 CheckFn[GuardrailLLMContextProto, str, TLLMCfg]: Async check function
421451 to be registered as a guardrail.
422452 """
453+ # Default to LLMReasoningOutput if no custom model provided
454+ extended_output_model = output_model or LLMReasoningOutput
423455
424456 async def guardrail_func (
425457 ctx : GuardrailLLMContextProto ,
@@ -441,12 +473,16 @@ async def guardrail_func(
441473 else :
442474 rendered_system_prompt = system_prompt
443475
476+ # Use base LLMOutput if reasoning is disabled, otherwise use the extended model
477+ include_reasoning = getattr (config , "include_reasoning" , False )
478+ selected_output_model = extended_output_model if include_reasoning else LLMOutput
479+
444480 analysis , token_usage = await run_llm (
445481 data ,
446482 rendered_system_prompt ,
447483 ctx .guardrail_llm ,
448484 config .model ,
449- output_model ,
485+ selected_output_model ,
450486 )
451487
452488 # Check if this is an error result
0 commit comments