Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 136 additions & 0 deletions core/llm/openaiTypeConverters.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -643,6 +643,142 @@ describe("openaiTypeConverters", () => {
});
});

describe("orphaned fc_ ID stripping (context compaction)", () => {
it("should strip fc_ ID from function_call when reasoning was pruned from context", () => {
// Scenario: thinking message was removed by compileChatMessages due to context overflow.
// The assistant message still has the fc_ ID that references the now-absent reasoning.
const messages: ChatMessage[] = [
{
role: "user",
content: "Hello",
},
// thinking message was pruned — NOT present in messages
{
role: "assistant",
content: "",
toolCalls: [
{
id: "call_001",
type: "function",
function: { name: "read_file", arguments: '{"path":"a.txt"}' },
},
],
metadata: {
responsesOutputItemIds: ["fc_001"], // fc_ ID orphaned without reasoning
},
} as ChatMessage,
{
role: "tool",
content: "file contents",
toolCallId: "call_001",
} as ChatMessage,
];

const result = toResponsesInput(messages);

// function_call should be present but WITHOUT its fc_ ID
const functionCalls = getFunctionCalls(result);
expect(functionCalls.length).toBe(1);
expect(functionCalls[0].id).toBeUndefined();
expect(functionCalls[0].call_id).toBe("call_001");

// function_call_output should still be present
const outputs = getFunctionCallOutputs(result);
expect(outputs.length).toBe(1);
expect(outputs[0].call_id).toBe("call_001");
});

it("should keep fc_ ID when reasoning is present before function_call", () => {
// Sanity check: valid case should still work
const messages: ChatMessage[] = [
{
role: "thinking",
content: "",
reasoning_details: [
{ type: "reasoning_id", id: "rs_001" },
{
type: "encrypted_content",
encrypted_content: "encrypted_data",
},
],
metadata: { reasoningId: "rs_001", encrypted_content: "encrypted_data" },
} as ChatMessage,
{
role: "assistant",
content: "",
toolCalls: [
{
id: "call_001",
type: "function",
function: { name: "read_file", arguments: '{"path":"a.txt"}' },
},
],
metadata: {
responsesOutputItemIds: ["fc_001"],
},
} as ChatMessage,
];

const result = toResponsesInput(messages);

const reasoning = getReasoningItems(result);
expect(reasoning.length).toBe(1);

const functionCalls = getFunctionCalls(result);
expect(functionCalls.length).toBe(1);
expect(functionCalls[0].id).toBe("fc_001"); // ID preserved
});

it("should strip fc_ IDs from multiple pruned function_calls", () => {
const messages: ChatMessage[] = [
{
role: "user",
content: "Use two tools",
},
// thinking message pruned
{
role: "assistant",
content: "",
toolCalls: [
{
id: "call_001",
type: "function",
function: { name: "tool_a", arguments: "{}" },
},
{
id: "call_002",
type: "function",
function: { name: "tool_b", arguments: "{}" },
},
],
metadata: {
responsesOutputItemIds: ["fc_001", "fc_002"],
},
} as ChatMessage,
{
role: "tool",
content: "result_a",
toolCallId: "call_001",
} as ChatMessage,
{
role: "tool",
content: "result_b",
toolCallId: "call_002",
} as ChatMessage,
];

const result = toResponsesInput(messages);

const functionCalls = getFunctionCalls(result);
expect(functionCalls.length).toBe(2);
// Both fc_ IDs should be stripped
expect(functionCalls[0].id).toBeUndefined();
expect(functionCalls[1].id).toBeUndefined();
expect(functionCalls[0].call_id).toBe("call_001");
expect(functionCalls[1].call_id).toBe("call_002");
});
});

describe("orphaned function_call_output removal", () => {
it("should remove function_call_output with no matching function_call", () => {
// This can happen when conversation history is truncated/pruned
Expand Down
42 changes: 42 additions & 0 deletions core/llm/openaiTypeConverters.ts
Original file line number Diff line number Diff line change
Expand Up @@ -922,6 +922,7 @@ function isValidSuccessor(item: ResponseInputItem | undefined): boolean {
* - Removes reasoning without encrypted_content; strips id from subsequent items
* - Removes reasoning not followed by function_call or message
* - Removes orphaned function_call_output with no matching function_call
* - Strips fc_ IDs from function_calls whose reasoning was pruned from context
*/
function sanitizeResponsesInput(input: ResponseInput): ResponseInput {
const skipIndices = new Set<number>();
Expand Down Expand Up @@ -953,6 +954,47 @@ function sanitizeResponsesInput(input: ResponseInput): ResponseInput {
}
}

// Second pass: strip fc_ IDs from function_calls that have no preceding
// (kept) reasoning item in the same turn. This handles the case where a
// thinking message was pruned from context during compileChatMessages —
// the assistant message still carries fc_ IDs that reference the now-absent
// reasoning item, which causes a Responses API 400 error.
for (let i = 0; i < input.length; i++) {
if (skipIndices.has(i) || stripIdIndices.has(i)) continue;

const item = input[i];
if (!isItemType<ResponseFunctionToolCall>(item, "function_call")) continue;

const fc = item as ResponseFunctionToolCall;
if (!fc.id?.startsWith("fc_")) continue;

// Scan backward within the same turn (reasoning + function_call block).
// Stop at any item that isn't a reasoning or function_call — that signals
// a turn boundary (user message, function_call_output, etc.).
let foundReasoning = false;
for (let j = i - 1; j >= 0; j--) {
// Skip items that will be removed (they don't count as valid reasoning)
if (skipIndices.has(j)) continue;

const prev = input[j];
if (isItemType<ResponseReasoningItem>(prev, "reasoning")) {
foundReasoning = true;
break;
} else if (isItemType<ResponseFunctionToolCall>(prev, "function_call")) {
// Another function_call in the same block — keep looking backward
continue;
} else {
// Any other item (user/assistant message, function_call_output, etc.)
// means we crossed a turn boundary without finding a reasoning item
break;
}
}

if (!foundReasoning) {
stripIdIndices.add(i);
}
}

const result: ResponseInput = [];
for (let i = 0; i < input.length; i++) {
if (skipIndices.has(i)) continue;
Expand Down
2 changes: 1 addition & 1 deletion gui/src/redux/selectors/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ export const selectSlashCommandComboBoxInputs = createSelector(
description: cmd.description,
type: "slashCommand" as ComboBoxItemType,
content: content,
source: cmd.source,
slashCommandSource: cmd.source,
} as ComboBoxItem;
}) || []
);
Expand Down
3 changes: 2 additions & 1 deletion gui/src/redux/thunks/edit.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ export const streamEditThunk = createAsyncThunk<
streamThunkWrapper(async () => {
dispatch(setActive());

const state = getState();
const { selectedContextItems, content } = await resolveEditorContent({
editorState,
modifiers: {
Expand All @@ -48,7 +49,7 @@ export const streamEditThunk = createAsyncThunk<
},
ideMessenger: extra.ideMessenger,
defaultContextProviders: [],
availableSlashCommands: [],
availableSlashCommands: state.config.config.slashCommands ?? [],
dispatch,
getState,
});
Expand Down
Loading