Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions strix/agents/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,13 @@ async def _initialize_sandbox_and_state(self, task: str) -> None:
sandbox_mode = os.getenv("STRIX_SANDBOX_MODE", "false").lower() == "true"
if not sandbox_mode and self.state.sandbox_id is None:
from strix.runtime import get_runtime
from strix.telemetry.tracer import get_global_tracer

tracer = get_global_tracer()
if tracer:
tracer.update_agent_system_message(
self.state.agent_id, "Setting up sandbox environment..."
)

try:
runtime = get_runtime()
Expand Down Expand Up @@ -367,6 +374,9 @@ async def _initialize_sandbox_and_state(self, task: str) -> None:
async def _process_iteration(self, tracer: Optional["Tracer"]) -> bool | None:
final_response = None

if tracer:
tracer.update_agent_system_message(self.state.agent_id, "Thinking...")

async for response in self.llm.generate(self.state.get_conversation_history()):
final_response = response
if tracer and response.content:
Expand Down Expand Up @@ -408,8 +418,19 @@ async def _process_iteration(self, tracer: Optional["Tracer"]) -> bool | None:
)

if actions:
if tracer:
tool_names = [a.get("toolName") or a.get("tool_name") or "tool" for a in actions]
display_names = tool_names[:2]
overflow = len(tool_names) - 2
suffix = f" +{overflow} more" if overflow > 0 else ""
tracer.update_agent_system_message(
self.state.agent_id, f"Executing {', '.join(display_names)}{suffix}..."
)
return await self._execute_actions(actions, tracer)

if tracer:
tracer.update_agent_system_message(self.state.agent_id, "Processing response...")

return None

async def _execute_actions(self, actions: list[Any], tracer: Optional["Tracer"]) -> bool:
Expand Down
3 changes: 2 additions & 1 deletion strix/interface/tool_components/thinking_renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ def render(cls, tool_data: dict[str, Any]) -> Static:
text.append("\n ")

if thought:
text.append(thought, style="italic dim")
indented_thought = "\n ".join(thought.split("\n"))
Copy link

Copilot AI Mar 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thought.split("\n") won’t handle Windows newlines (\r\n) cleanly and can leave stray \r characters in the output. Using thought.splitlines() would normalize newline handling and match patterns used elsewhere in the interface renderers.

Suggested change
indented_thought = "\n ".join(thought.split("\n"))
indented_thought = "\n ".join(thought.splitlines())

Copilot uses AI. Check for mistakes.
text.append(indented_thought, style="italic dim")
else:
text.append("Thinking...", style="italic dim")

Expand Down
42 changes: 36 additions & 6 deletions strix/interface/tui.py
Original file line number Diff line number Diff line change
Expand Up @@ -1238,14 +1238,19 @@ def keymap_styled(keys: list[tuple[str, str]]) -> Text:
return (Text(" "), keymap, False)

if status == "running":
sys_msg = agent_data.get("system_message", "")
if self._agent_has_real_activity(agent_id):
animated_text = Text()
animated_text.append_text(self._get_sweep_animation(self._sweep_colors))
if sys_msg:
animated_text.append(sys_msg, style="dim italic")
animated_text.append(" ", style="dim")
animated_text.append("esc", style="white")
animated_text.append(" ", style="dim")
animated_text.append("stop", style="dim")
return (animated_text, keymap_styled([("ctrl-q", "quit")]), True)
animated_text = self._get_animated_verb_text(agent_id, "Initializing")
msg = sys_msg or "Initializing..."
animated_text = self._get_animated_verb_text(agent_id, msg)
return (animated_text, keymap_styled([("ctrl-q", "quit")]), True)

return (None, Text(), False)
Expand Down Expand Up @@ -1678,21 +1683,46 @@ def _render_chat_content(self, msg_data: dict[str, Any]) -> Any:
content = msg_data.get("content", "")
metadata = msg_data.get("metadata", {})

if not content:
return None

if role == "user":
if not content:
return None
return UserMessageRenderer.render_simple(content)

renderables = []

if "thinking_blocks" in metadata and metadata["thinking_blocks"]:
from strix.interface.tool_components.thinking_renderer import ThinkRenderer

for block in metadata["thinking_blocks"]:
Comment on lines +1693 to +1696
Copy link

Copilot AI Mar 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This renderer expects thinking_blocks under msg_data["metadata"], but Tracer.log_chat_message(...) is typically called without metadata for normal assistant responses (e.g., BaseAgent logs assistant messages without attaching thinking_blocks). That means these blocks likely won’t render in practice. Consider propagating thinking_blocks into tracer chat message metadata when logging assistant messages, or adjust the TUI to also read thinking_blocks from the message root if that’s where they’re stored elsewhere.

Suggested change
if "thinking_blocks" in metadata and metadata["thinking_blocks"]:
from strix.interface.tool_components.thinking_renderer import ThinkRenderer
for block in metadata["thinking_blocks"]:
# Prefer thinking_blocks from metadata, but fall back to root-level key
thinking_blocks = metadata.get("thinking_blocks") or msg_data.get("thinking_blocks")
if thinking_blocks:
from strix.interface.tool_components.thinking_renderer import ThinkRenderer
for block in thinking_blocks:

Copilot uses AI. Check for mistakes.
thought = block.get("thinking", "")
if thought:
renderables.append(
ThinkRenderer.render({"args": {"thought": thought}}).renderable
)

if not content and not renderables:
return None

if metadata.get("interrupted"):
streaming_result = self._render_streaming_content(content)
interrupted_text = Text()
interrupted_text.append("\n")
interrupted_text.append("⚠ ", style="yellow")
interrupted_text.append("Interrupted by user", style="yellow dim")
return self._merge_renderables([streaming_result, interrupted_text])
return self._merge_renderables([*renderables, streaming_result, interrupted_text])

if content:
msg_renderable = AgentMessageRenderer.render_simple(content)
renderables.append(msg_renderable)

return AgentMessageRenderer.render_simple(content)
if not renderables:
return None

if len(renderables) == 1:
r = renderables[0]
return self._sanitize_text(r) if isinstance(r, Text) else r

return self._merge_renderables(renderables)

def _render_tool_content_simple(self, tool_data: dict[str, Any]) -> Any:
tool_name = tool_data.get("tool_name", "Unknown Tool")
Expand Down
21 changes: 19 additions & 2 deletions strix/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,12 +141,21 @@ def set_agent_identity(self, agent_name: str | None, agent_id: str | None) -> No
async def generate(
self, conversation_history: list[dict[str, Any]]
) -> AsyncIterator[LLMResponse]:
from strix.telemetry.tracer import get_global_tracer

tracer = get_global_tracer()
if tracer and self.agent_id:
tracer.update_agent_system_message(self.agent_id, "Compressing memory...")

messages = self._prepare_messages(conversation_history)
max_retries = int(Config.get("strix_llm_max_retries") or "5")

for attempt in range(max_retries + 1):
try:
async for response in self._stream(messages):
if tracer and self.agent_id:
tracer.update_agent_system_message(self.agent_id, "Waiting for LLM provider...")

async for response in self._stream(messages, tracer):
yield response
return # noqa: TRY300
except Exception as e: # noqa: BLE001
Expand All @@ -155,15 +164,23 @@ async def generate(
wait = min(10, 2 * (2**attempt))
await asyncio.sleep(wait)

async def _stream(self, messages: list[dict[str, Any]]) -> AsyncIterator[LLMResponse]:
async def _stream(
self, messages: list[dict[str, Any]], tracer: Any = None
) -> AsyncIterator[LLMResponse]:
accumulated = ""
chunks: list[Any] = []
done_streaming = 0
first_chunk_received = False

self._total_stats.requests += 1
response = await acompletion(**self._build_completion_args(messages), stream=True)

async for chunk in response:
if not first_chunk_received:
first_chunk_received = True
if tracer and self.agent_id:
tracer.update_agent_system_message(self.agent_id, "Generating response...")

chunks.append(chunk)
if done_streaming:
done_streaming += 1
Expand Down
7 changes: 7 additions & 0 deletions strix/telemetry/tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
_OTEL_BOOTSTRAPPED = False
_OTEL_REMOTE_ENABLED = False


def get_global_tracer() -> Optional["Tracer"]:
return _global_tracer

Expand Down Expand Up @@ -437,6 +438,7 @@ def log_agent_creation(
"name": name,
"task": task,
"status": "running",
"system_message": "",
"parent_id": parent_id,
"created_at": datetime.now(UTC).isoformat(),
"updated_at": datetime.now(UTC).isoformat(),
Expand Down Expand Up @@ -585,6 +587,11 @@ def update_agent_status(
source="strix.agents",
)

def update_agent_system_message(self, agent_id: str, message: str) -> None:
if agent_id in self.agents:
self.agents[agent_id]["system_message"] = message
self.agents[agent_id]["updated_at"] = datetime.now(UTC).isoformat()

def set_scan_config(self, config: dict[str, Any]) -> None:
self.scan_config = config
self.run_metadata.update(
Expand Down