@@ -519,3 +519,50 @@ async def fake_run_llm(
519519
520520 # Should pass empty list when no conversation history
521521 assert captured_args ["conversation_history" ] == [] # noqa: S101
522+
523+
524+ @pytest .mark .asyncio
525+ async def test_run_llm_strips_whitespace_in_single_turn_mode () -> None :
526+ """run_llm should strip whitespace from input in single-turn mode."""
527+ client = _FakeAsyncClientCapture ('{"flagged": false, "confidence": 0.1}' )
528+
529+ await run_llm (
530+ text = " Test input with whitespace \n " ,
531+ system_prompt = "Analyze." ,
532+ client = client , # type: ignore[arg-type]
533+ model = "gpt-test" ,
534+ output_model = LLMOutput ,
535+ conversation_history = None ,
536+ max_turns = 10 ,
537+ )
538+
539+ # Should strip whitespace in single-turn mode
540+ user_message = client .captured_messages [1 ]["content" ]
541+ assert "# Text\n \n Test input with whitespace" in user_message # noqa: S101
542+ assert " Test input" not in user_message # noqa: S101
543+
544+
545+ @pytest .mark .asyncio
546+ async def test_run_llm_strips_whitespace_in_multi_turn_mode () -> None :
547+ """run_llm should strip whitespace from input in multi-turn mode."""
548+ client = _FakeAsyncClientCapture ('{"flagged": false, "confidence": 0.1}' )
549+ conversation_history = [
550+ {"role" : "user" , "content" : "Previous message" },
551+ ]
552+
553+ await run_llm (
554+ text = " Test input with whitespace \n " ,
555+ system_prompt = "Analyze." ,
556+ client = client , # type: ignore[arg-type]
557+ model = "gpt-test" ,
558+ output_model = LLMOutput ,
559+ conversation_history = conversation_history ,
560+ max_turns = 10 ,
561+ )
562+
563+ # Should strip whitespace in multi-turn mode
564+ user_message = client .captured_messages [1 ]["content" ]
565+ import json
566+ json_start = user_message .find ("{" )
567+ payload = json .loads (user_message [json_start :])
568+ assert payload ["latest_input" ] == "Test input with whitespace" # noqa: S101
0 commit comments