@@ -572,6 +572,38 @@ async def test_events_with_empty_content_are_skipped():
572572 role = "user" ,
573573 ),
574574 ),
575+ # Event with content that has executable code part
576+ Event (
577+ invocation_id = "inv10" ,
578+ author = "test_agent" ,
579+ content = types .Content (
580+ parts = [
581+ types .Part (
582+ executable_code = types .ExecutableCode (
583+ code = "print('hello')" ,
584+ language = "PYTHON" ,
585+ )
586+ )
587+ ],
588+ role = "model" ,
589+ ),
590+ ),
591+ # Event with content that has code execution result part
592+ Event (
593+ invocation_id = "inv11" ,
594+ author = "test_agent" ,
595+ content = types .Content (
596+ parts = [
597+ types .Part (
598+ code_execution_result = types .CodeExecutionResult (
599+ outcome = "OUTCOME_OK" ,
600+ output = "hello" ,
601+ )
602+ )
603+ ],
604+ role = "model" ,
605+ ),
606+ ),
575607 ]
576608 invocation_context .session .events = events
577609
@@ -608,4 +640,153 @@ async def test_events_with_empty_content_are_skipped():
608640 parts = [types .Part (text = "" ), types .Part (text = "Mixed content" )],
609641 role = "user" ,
610642 ),
643+ types .Content (
644+ parts = [
645+ types .Part (
646+ executable_code = types .ExecutableCode (
647+ code = "print('hello')" ,
648+ language = "PYTHON" ,
649+ )
650+ )
651+ ],
652+ role = "model" ,
653+ ),
654+ types .Content (
655+ parts = [
656+ types .Part (
657+ code_execution_result = types .CodeExecutionResult (
658+ outcome = "OUTCOME_OK" ,
659+ output = "hello" ,
660+ )
661+ )
662+ ],
663+ role = "model" ,
664+ ),
611665 ]
666+
667+
668+ @pytest .mark .asyncio
669+ async def test_code_execution_result_events_are_not_skipped ():
670+ """Test that events with code execution result are not skipped.
671+
672+ This is a regression test for the endless loop bug where code executor
673+ outputs were not passed to the LLM because the events were incorrectly
674+ filtered as empty.
675+ """
676+ agent = Agent (model = "gemini-2.5-flash" , name = "test_agent" )
677+ llm_request = LlmRequest (model = "gemini-2.5-flash" )
678+ invocation_context = await testing_utils .create_invocation_context (
679+ agent = agent
680+ )
681+
682+ events = [
683+ Event (
684+ invocation_id = "inv1" ,
685+ author = "user" ,
686+ content = types .UserContent ("Write code to calculate factorial" ),
687+ ),
688+ # Model generates code
689+ Event (
690+ invocation_id = "inv2" ,
691+ author = "test_agent" ,
692+ content = types .Content (
693+ parts = [
694+ types .Part (text = "Here's the code:" ),
695+ types .Part (
696+ executable_code = types .ExecutableCode (
697+ code = (
698+ "def factorial(n):\n return 1 if n <= 1 else n *"
699+ " factorial(n-1)\n print(factorial(5))"
700+ ),
701+ language = "PYTHON" ,
702+ )
703+ ),
704+ ],
705+ role = "model" ,
706+ ),
707+ ),
708+ # Code execution result
709+ Event (
710+ invocation_id = "inv3" ,
711+ author = "test_agent" ,
712+ content = types .Content (
713+ parts = [
714+ types .Part (
715+ code_execution_result = types .CodeExecutionResult (
716+ outcome = "OUTCOME_OK" ,
717+ output = "120" ,
718+ )
719+ )
720+ ],
721+ role = "model" ,
722+ ),
723+ ),
724+ ]
725+ invocation_context .session .events = events
726+
727+ # Process the request
728+ async for _ in contents .request_processor .run_async (
729+ invocation_context , llm_request
730+ ):
731+ pass
732+
733+ # Verify all three events are included, especially the code execution result
734+ assert len (llm_request .contents ) == 3
735+ assert llm_request .contents [0 ] == types .UserContent (
736+ "Write code to calculate factorial"
737+ )
738+ # Second event has executable code
739+ assert llm_request .contents [1 ].parts [1 ].executable_code is not None
740+ # Third event has code execution result - this was the bug!
741+ assert llm_request .contents [2 ].parts [0 ].code_execution_result is not None
742+ assert llm_request .contents [2 ].parts [0 ].code_execution_result .output == "120"
743+
744+
745+ @pytest .mark .asyncio
746+ async def test_code_execution_result_not_in_first_part_is_not_skipped ():
747+ """Test that code execution results aren't skipped.
748+
749+ This covers results that appear in a non-first part.
750+ """
751+ agent = Agent (model = "gemini-2.5-flash" , name = "test_agent" )
752+ llm_request = LlmRequest (model = "gemini-2.5-flash" )
753+ invocation_context = await testing_utils .create_invocation_context (
754+ agent = agent
755+ )
756+
757+ events = [
758+ Event (
759+ invocation_id = "inv1" ,
760+ author = "user" ,
761+ content = types .UserContent ("Run some code." ),
762+ ),
763+ Event (
764+ invocation_id = "inv2" ,
765+ author = "test_agent" ,
766+ content = types .Content (
767+ parts = [
768+ types .Part (text = "" ),
769+ types .Part (
770+ code_execution_result = types .CodeExecutionResult (
771+ outcome = "OUTCOME_OK" ,
772+ output = "42" ,
773+ )
774+ ),
775+ ],
776+ role = "model" ,
777+ ),
778+ ),
779+ ]
780+ invocation_context .session .events = events
781+
782+ async for _ in contents .request_processor .run_async (
783+ invocation_context , llm_request
784+ ):
785+ pass
786+
787+ assert len (llm_request .contents ) == 2
788+ assert any (
789+ part .code_execution_result is not None
790+ and part .code_execution_result .output == "42"
791+ for part in llm_request .contents [1 ].parts
792+ )
0 commit comments