-
Notifications
You must be signed in to change notification settings - Fork 2.6k
feat: OWASP WSTG methodology alignment & TUI live status #328
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
9f0c625
a54ba27
4b72fc0
8c5d946
c56631e
0439d70
8f02d52
6c02017
8859f2b
8abbb58
e5b0464
bf6ea9c
4a3cc13
64aa3b5
24b5147
76fcf75
650ec46
e7e03e0
5be1025
82bbc11
ff30eee
7c7940b
dc23c1f
a567677
19631e2
877af2b
6592a6f
4785d4b
88ffb3c
62bdf09
25f8bd7
1fc997d
2f6c1ed
e9f43c3
a913f76
95e2f88
9dcb302
2bc2522
1236065
ce2353a
b15d3d6
9573242
cfb8b35
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -97,3 +97,4 @@ Thumbs.db | |
| schema.graphql | ||
|
|
||
| .opencode/ | ||
| /test_run.sh | ||
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change | ||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -1036,13 +1036,39 @@ def _merge_renderables(renderables: list[Any]) -> Text: | |||||||||||||
| if i > 0: | ||||||||||||||
| combined.append("\n") | ||||||||||||||
| StrixTUIApp._append_renderable(combined, item) | ||||||||||||||
| return combined | ||||||||||||||
| return StrixTUIApp._sanitize_text_spans(combined) | ||||||||||||||
|
|
||||||||||||||
| @staticmethod | ||||||||||||||
| def _sanitize_text_spans(text: Text) -> Text: | ||||||||||||||
| plain = text.plain | ||||||||||||||
| plain_len = len(plain) | ||||||||||||||
|
|
||||||||||||||
| if plain_len == 0 or not text.spans: | ||||||||||||||
| return text | ||||||||||||||
|
|
||||||||||||||
| sanitized = Text( | ||||||||||||||
| plain, | ||||||||||||||
| style=text.style, | ||||||||||||||
| justify=text.justify, | ||||||||||||||
| overflow=text.overflow, | ||||||||||||||
| no_wrap=text.no_wrap, | ||||||||||||||
| end=text.end, | ||||||||||||||
| tab_size=text.tab_size, | ||||||||||||||
| ) | ||||||||||||||
|
|
||||||||||||||
| for span in text.spans: | ||||||||||||||
| start = max(0, min(span.start, plain_len)) | ||||||||||||||
| end = max(0, min(span.end, plain_len)) | ||||||||||||||
| if end > start: | ||||||||||||||
| sanitized.stylize(span.style, start, end) | ||||||||||||||
|
|
||||||||||||||
| return sanitized | ||||||||||||||
|
|
||||||||||||||
| @staticmethod | ||||||||||||||
| def _append_renderable(combined: Text, item: Any) -> None: | ||||||||||||||
| """Recursively append a renderable's text content to a combined Text.""" | ||||||||||||||
| if isinstance(item, Text): | ||||||||||||||
| combined.append_text(item) | ||||||||||||||
| combined.append_text(StrixTUIApp._sanitize_text_spans(item)) | ||||||||||||||
| elif isinstance(item, Group): | ||||||||||||||
| for j, sub in enumerate(item.renderables): | ||||||||||||||
| if j > 0: | ||||||||||||||
|
|
@@ -1087,7 +1113,7 @@ def _get_rendered_events_content(self, events: list[dict[str, Any]]) -> Any: | |||||||||||||
| return Text() | ||||||||||||||
|
|
||||||||||||||
| if len(renderables) == 1 and isinstance(renderables[0], Text): | ||||||||||||||
| return renderables[0] | ||||||||||||||
| return self._sanitize_text_spans(renderables[0]) | ||||||||||||||
|
|
||||||||||||||
| return self._merge_renderables(renderables) | ||||||||||||||
|
|
||||||||||||||
|
|
@@ -1123,7 +1149,7 @@ def _render_streaming_content(self, content: str, agent_id: str | None = None) - | |||||||||||||
| if not renderables: | ||||||||||||||
| result = Text() | ||||||||||||||
| elif len(renderables) == 1 and isinstance(renderables[0], Text): | ||||||||||||||
| result = renderables[0] | ||||||||||||||
| result = self._sanitize_text_spans(renderables[0]) | ||||||||||||||
| else: | ||||||||||||||
| result = self._merge_renderables(renderables) | ||||||||||||||
|
|
||||||||||||||
|
|
@@ -1215,14 +1241,19 @@ def keymap_styled(keys: list[tuple[str, str]]) -> Text: | |||||||||||||
| return (Text(" "), keymap, False) | ||||||||||||||
|
|
||||||||||||||
| if status == "running": | ||||||||||||||
| sys_msg = agent_data.get("system_message", "") | ||||||||||||||
| if self._agent_has_real_activity(agent_id): | ||||||||||||||
| animated_text = Text() | ||||||||||||||
| animated_text.append_text(self._get_sweep_animation(self._sweep_colors)) | ||||||||||||||
| if sys_msg: | ||||||||||||||
| animated_text.append(sys_msg, style="dim italic") | ||||||||||||||
| animated_text.append(" ", style="dim") | ||||||||||||||
| animated_text.append("esc", style="white") | ||||||||||||||
| animated_text.append(" ", style="dim") | ||||||||||||||
| animated_text.append("stop", style="dim") | ||||||||||||||
| return (animated_text, keymap_styled([("ctrl-q", "quit")]), True) | ||||||||||||||
| animated_text = self._get_animated_verb_text(agent_id, "Initializing") | ||||||||||||||
| msg = sys_msg or "Initializing..." | ||||||||||||||
| animated_text = self._get_animated_verb_text(agent_id, msg) | ||||||||||||||
| return (animated_text, keymap_styled([("ctrl-q", "quit")]), True) | ||||||||||||||
|
|
||||||||||||||
| return (None, Text(), False) | ||||||||||||||
|
|
@@ -1394,7 +1425,7 @@ def _animate_dots(self) -> None: | |||||||||||||
| if not has_active_agents: | ||||||||||||||
| has_active_agents = any( | ||||||||||||||
| agent_data.get("status", "running") in ["running", "waiting"] | ||||||||||||||
| for agent_data in self.tracer.agents.values() | ||||||||||||||
| for agent_data in list(self.tracer.agents.values()) | ||||||||||||||
| ) | ||||||||||||||
|
|
||||||||||||||
| if not has_active_agents: | ||||||||||||||
|
|
@@ -1655,12 +1686,26 @@ def _render_chat_content(self, msg_data: dict[str, Any]) -> Any: | |||||||||||||
| content = msg_data.get("content", "") | ||||||||||||||
| metadata = msg_data.get("metadata", {}) | ||||||||||||||
|
|
||||||||||||||
| if not content: | ||||||||||||||
| return None | ||||||||||||||
|
|
||||||||||||||
| if role == "user": | ||||||||||||||
| if not content: | ||||||||||||||
| return None | ||||||||||||||
| return UserMessageRenderer.render_simple(content) | ||||||||||||||
|
Comment on lines
1689
to
1692
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Empty user Before this change the function started with: if not content:
return NoneThat check ran before the Now the user branch fires first and immediately calls The assistant branch keeps the guard (
Suggested change
Prompt To Fix With AIThis is a comment left during a code review.
Path: strix/interface/tui.py
Line: 1689-1690
Comment:
**Empty user `content` bypasses `None` guard**
Before this change the function started with:
```python
if not content:
return None
```
That check ran before the `role` branch, so user messages with empty content returned `None` safely.
Now the user branch fires *first* and immediately calls `UserMessageRenderer.render_simple(content)` without verifying that `content` is non-empty. If a user-role message arrives with `content == ""` (e.g. a synthetic message injected by `process_tool_invocations` before its content is set, or any future code path that appends an empty user turn), `render_simple` is called with an empty string and likely returns a blank widget entry in the chat log instead of `None`.
The assistant branch keeps the guard (`if not content and not renderables: return None`), so the asymmetry is inconsistent. A minimal fix:
```suggestion
if role == "user":
if not content:
return None
return UserMessageRenderer.render_simple(content)
```
How can I resolve this? If you propose a fix, please make it concise. |
||||||||||||||
|
|
||||||||||||||
| renderables = [] | ||||||||||||||
|
|
||||||||||||||
| if "thinking_blocks" in metadata and metadata["thinking_blocks"]: | ||||||||||||||
| from strix.interface.tool_components.thinking_renderer import ThinkRenderer | ||||||||||||||
|
|
||||||||||||||
| for block in metadata["thinking_blocks"]: | ||||||||||||||
| thought = block.get("thinking", "") | ||||||||||||||
| if thought: | ||||||||||||||
| renderables.append( | ||||||||||||||
| ThinkRenderer.render({"args": {"thought": thought}}).renderable | ||||||||||||||
| ) | ||||||||||||||
|
|
||||||||||||||
| if not content and not renderables: | ||||||||||||||
| return None | ||||||||||||||
|
|
||||||||||||||
| if metadata.get("interrupted"): | ||||||||||||||
| streaming_result = self._render_streaming_content(content) | ||||||||||||||
| interrupted_text = Text() | ||||||||||||||
|
|
@@ -1669,7 +1714,18 @@ def _render_chat_content(self, msg_data: dict[str, Any]) -> Any: | |||||||||||||
| interrupted_text.append("Interrupted by user", style="yellow dim") | ||||||||||||||
| return self._merge_renderables([streaming_result, interrupted_text]) | ||||||||||||||
|
|
||||||||||||||
| return AgentMessageRenderer.render_simple(content) | ||||||||||||||
| if content: | ||||||||||||||
| msg_renderable = AgentMessageRenderer.render_simple(content) | ||||||||||||||
| renderables.append(msg_renderable) | ||||||||||||||
|
|
||||||||||||||
| if not renderables: | ||||||||||||||
| return None | ||||||||||||||
|
|
||||||||||||||
| if len(renderables) == 1: | ||||||||||||||
| r = renderables[0] | ||||||||||||||
| return self._sanitize_text_spans(r) if isinstance(r, Text) else r | ||||||||||||||
|
|
||||||||||||||
| return self._merge_renderables(renderables) | ||||||||||||||
|
|
||||||||||||||
| def _render_tool_content_simple(self, tool_data: dict[str, Any]) -> Any: | ||||||||||||||
| tool_name = tool_data.get("tool_name", "Unknown Tool") | ||||||||||||||
|
|
||||||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Corrective message injection has no retry cap
Every time the LLM produces a plain-text response with no tool calls,
corrective_messageis injected as auserturn intoself.state.messagesand the iteration returnsFalse(loop continues). There is no guard limiting how many times this can happen per run. If a model consistently produces plain-text (e.g., due to a prompt formatting mismatch or a model that ignores tool-call instructions), every failed iteration appends another ~150-token user message to the conversation history. Over the lifetime of an agent with a high max-iteration budget this can consume a significant portion of the context window with repetitive corrective content, crowding out actual task history and compounding the existing memory growth concern.Consider tracking a per-agent retry counter and triggering a harder recovery (e.g.,
agent_finishwith an error, or raisingLLMRequestFailedError) afterNconsecutive plain-text responses:Reset
_no_tool_call_streakto0at the top of_process_iterationwheneveractionsis non-empty.Prompt To Fix With AI