diff --git a/CHANGELOG.md b/CHANGELOG.md
index ee82d4ff..26537f47 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,8 @@
## Unreleased
+- (OpenAI Chat) - Configurable reasoning history via `reasoningHistory` (model-level, default: all)
+
## 0.94.1
- Fix tools prompt override not working via config.
diff --git a/docs/configuration.md b/docs/configuration.md
index c84eb018..c9ad8708 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -693,7 +693,8 @@ To configure, add your OTLP collector config via `:otlp` map following [otlp aut
thinkTagEnd?: string;
models: {[key: string]: {
modelName?: string;
- extraPayload?: {[key: string]: any}
+ extraPayload?: {[key: string]: any};
+ reasoningHistory?: "all" | "turn" | "off";
}};
}};
defaultModel?: string;
diff --git a/docs/models.md b/docs/models.md
index cc8da224..c30d153b 100644
--- a/docs/models.md
+++ b/docs/models.md
@@ -61,19 +61,20 @@ You just need to add your provider to `providers` and make sure add the required
Schema:
-| Option | Type | Description | Required |
-|-------------------------------|--------|--------------------------------------------------------------------------------------------------------------|----------|
-| `api` | string | The API schema to use (`"openai-responses"`, `"openai-chat"`, or `"anthropic"`) | Yes |
-| `url` | string | API URL (with support for env like `${env:MY_URL}`) | No* |
-| `key` | string | API key (with support for `${env:MY_KEY}` or `{netrc:api.my-provider.com}` | No* |
-| `completionUrlRelativePath` | string | Optional override for the completion endpoint path (see defaults below and examples like Azure) | No |
-| `thinkTagStart` | string | Optional override the think start tag tag for openai-chat (Default: "") api | No |
-| `thinkTagEnd` | string | Optional override the think end tag for openai-chat (Default: "") api | No |
-| `httpClient` | map | Allow customize the http-client for this provider requests, like changing http version | No |
-| `models` | map | Key: model name, value: its config | Yes |
-| `models extraPayload` | map | Extra payload sent in body to LLM | No |
-| `models modelName` | string | Override model name, useful to have multiple models with different configs and names that use same LLM model | No |
-| `fetchModels` | boolean | Enable automatic model discovery from `/models` endpoint (OpenAI-compatible providers) | No |
+| Option | Type | Description | Required |
+|---------------------------------------|---------|--------------------------------------------------------------------------------------------------------------|----------|
+| `api` | string | The API schema to use (`"openai-responses"`, `"openai-chat"`, or `"anthropic"`) | Yes |
+| `url` | string | API URL (with support for env like `${env:MY_URL}`) | No* |
+| `key` | string | API key (with support for `${env:MY_KEY}` or `{netrc:api.my-provider.com}` | No* |
+| `completionUrlRelativePath` | string | Optional override for the completion endpoint path (see defaults below and examples like Azure) | No |
+| `thinkTagStart` | string | Optional override the think start tag tag for openai-chat (Default: "") api | No |
+| `thinkTagEnd` | string | Optional override the think end tag for openai-chat (Default: "") api | No |
+| `httpClient` | map | Allow customize the http-client for this provider requests, like changing http version | No |
+| `models` | map | Key: model name, value: its config | Yes |
+| `models extraPayload` | map | Extra payload sent in body to LLM | No |
+| `models modelName` | string | Override model name, useful to have multiple models with different configs and names that use same LLM model | No |
+| `models reasoningHistory` | string | Controls reasoning in conversation history: `"all"` (default), `"turn"`, or `"off"` | No |
+| `fetchModels` | boolean | Enable automatic model discovery from `/models` endpoint (OpenAI-compatible providers) | No |
_* url and key will be searched as envs `_API_URL` and `_API_KEY`, they require the env to be found or config to work._
@@ -120,6 +121,19 @@ Examples:
This way both will use gpt-5 model but one will override the reasoning to be high instead of the default.
+=== "Reasoning in conversation history"
+ `reasoningHistory` - Controls whether and how the model's reasoning (thinking blocks, reasoning_content) is included in conversation history sent to the model.
+
+ **Available modes:**
+
+ - **`"all"`** (default, safe choice) - Send all reasoning blocks back to the model. The model can see its full chain of thought from previous turns. This is the safest option.
+ - **`"turn"`** - Send only reasoning from the current conversation turn (after the last user message). Previous reasoning is discarded before sending to the API.
+ - **`"off"`** - Never send reasoning blocks to the model. All reasoning is discarded before API calls.
+
+ **Note:** Reasoning is always shown to you in the UI and stored in chat history—this setting only controls what gets sent to the model in API requests.
+
+ Default: `"all"`.
+
=== "Dynamic model discovery"
For OpenAI-compatible providers, set `fetchModels: true` to automatically discover available models:
@@ -211,7 +225,7 @@ Notes:
3. Type the chosen method
4. Authenticate in your browser, copy the code.
5. Paste and send the code and done!
-
+
=== "Codex / Openai"
1. Login to Openai via the chat command `/login`.
diff --git a/src/eca/config.clj b/src/eca/config.clj
index 6b0b494c..5fdb7d9f 100644
--- a/src/eca/config.clj
+++ b/src/eca/config.clj
@@ -336,7 +336,8 @@
{:kebab-case-key
[[:providers]]
:keywordize-val
- [[:providers :ANY :httpClient]]
+ [[:providers :ANY :httpClient]
+ [:providers :ANY :models :ANY :reasoningHistory]]
:stringfy-key
[[:behavior]
[:providers]
diff --git a/src/eca/llm_api.clj b/src/eca/llm_api.clj
index 96536a8d..7e865ce2 100644
--- a/src/eca/llm_api.clj
+++ b/src/eca/llm_api.clj
@@ -97,6 +97,7 @@
provider-config (get-in config [:providers provider])
model-config (get-in provider-config [:models model])
extra-payload (:extraPayload model-config)
+ reasoning-history (or (:reasoningHistory model-config) :all)
[auth-type api-key] (llm-util/provider-api-key provider provider-auth config)
api-url (llm-util/provider-api-url provider config)
{:keys [handler]} (provider->api-handler provider config)
@@ -123,6 +124,7 @@
:web-search web-search
:extra-payload (merge {:parallel_tool_calls true}
extra-payload)
+ :reasoning-history reasoning-history
:api-url api-url
:api-key api-key
:auth-type auth-type}
@@ -157,6 +159,7 @@
:tools tools
:extra-payload (merge {:parallel_tool_calls true}
extra-payload)
+ :reasoning-history reasoning-history
:api-url api-url
:api-key api-key
:extra-headers {"openai-intent" "conversation-panel"
@@ -179,6 +182,7 @@
:tools tools
:think-tag-start ""
:think-tag-end ""
+ :reasoning-history reasoning-history
:extra-payload (merge {:parallel_tool_calls false}
(when reason?
{:extra_body {:google {:thinking_config {:include_thoughts true}}}})
@@ -221,6 +225,7 @@
:url-relative-path url-relative-path
:think-tag-start think-tag-start
:think-tag-end think-tag-end
+ :reasoning-history reasoning-history
:http-client http-client
:api-url api-url
:api-key api-key}
diff --git a/src/eca/llm_providers/openai_chat.clj b/src/eca/llm_providers/openai_chat.clj
index a6cfb164..4fa1af56 100644
--- a/src/eca/llm_providers/openai_chat.clj
+++ b/src/eca/llm_providers/openai_chat.clj
@@ -384,18 +384,26 @@
(reset! reasoning-state* {:id nil :type nil :content "" :buffer ""})))
(defn ^:private prune-history
- "Ensure DeepSeek-style reasoning_content is discarded from history but kept for the active turn.
- Only drops 'reason' messages WITH :delta-reasoning? before the last user message.
- Think-tag based reasoning (without :delta-reasoning?) is preserved and transformed to assistant messages."
- [messages]
- (if-let [last-user-idx (llm-util/find-last-user-msg-idx messages)]
- (->> messages
- (keep-indexed (fn [i m]
- (when-not (and (= "reason" (:role m))
- (get-in m [:content :delta-reasoning?])
- (< i last-user-idx))
- m)))
- vec)
+ "Discard reasoning messages from history based on reasoning-history mode.
+
+ Parameters:
+ - messages: the conversation history
+ - reasoning-history: controls reasoning retention
+ - :all - preserve all reasoning in history (safe default)
+ - :turn - preserve reasoning only in the current turn (after last user message)
+ - :off - discard all reasoning messages"
+ [messages reasoning-history]
+ (case reasoning-history
+ :all messages
+ :off (filterv #(not= "reason" (:role %)) messages)
+ :turn (if-let [last-user-idx (llm-util/find-last-user-msg-idx messages)]
+ (->> messages
+ (keep-indexed (fn [i m]
+ (when-not (and (= "reason" (:role m))
+ (< i last-user-idx))
+ m)))
+ vec)
+ messages)
messages))
(defn chat-completion!
@@ -406,14 +414,14 @@
Compatible with OpenRouter and other OpenAI-compatible providers."
[{:keys [model user-messages instructions temperature api-key api-url url-relative-path
past-messages tools extra-payload extra-headers supports-image?
- think-tag-start think-tag-end http-client]}
+ think-tag-start think-tag-end reasoning-history http-client]}
{:keys [on-message-received on-error on-prepare-tool-call on-tools-called on-reason on-usage-updated] :as callbacks}]
(let [think-tag-start (or think-tag-start "")
think-tag-end (or think-tag-end "")
stream? (boolean callbacks)
system-messages (when instructions [{:role "system" :content instructions}])
;; Pipeline: prune history -> normalize -> merge adjacent assistants -> filter
- all-messages (prune-history (vec (concat past-messages user-messages)))
+ all-messages (prune-history (vec (concat past-messages user-messages)) reasoning-history)
messages (vec (concat
system-messages
(normalize-messages all-messages supports-image? think-tag-start think-tag-end)))
@@ -473,7 +481,7 @@
tool-calls))
on-tools-called-wrapper (fn on-tools-called-wrapper [tools-to-call on-tools-called handle-response]
(when-let [{:keys [new-messages]} (on-tools-called tools-to-call)]
- (let [pruned-messages (prune-history new-messages)
+ (let [pruned-messages (prune-history new-messages reasoning-history)
new-messages-list (vec (concat
system-messages
(normalize-messages pruned-messages supports-image? think-tag-start think-tag-end)))
diff --git a/test/eca/llm_providers/openai_chat_test.clj b/test/eca/llm_providers/openai_chat_test.clj
index 67c9edf8..1051b6db 100644
--- a/test/eca/llm_providers/openai_chat_test.clj
+++ b/test/eca/llm_providers/openai_chat_test.clj
@@ -259,7 +259,7 @@
{:role "assistant" :reasoning_content "Thinking..."}])))))
(deftest prune-history-test
- (testing "Drops reason messages WITH :delta-reasoning? before the last user message (DeepSeek)"
+ (testing "reasoningHistory \"turn\" drops all reason messages before the last user message"
(is (match?
[{:role "user" :content "Q1"}
{:role "assistant" :content "A1"}
@@ -272,12 +272,12 @@
{:role "assistant" :content "A1"}
{:role "user" :content "Q2"}
{:role "reason" :content {:text "r2" :delta-reasoning? true}}
- {:role "assistant" :content "A2"}]))))
+ {:role "assistant" :content "A2"}]
+ :turn))))
- (testing "Preserves reason messages WITHOUT :delta-reasoning? (think-tag based)"
+ (testing "reasoningHistory \"turn\" also drops think-tag reasoning before last user message"
(is (match?
[{:role "user" :content "Q1"}
- {:role "reason" :content {:text "thinking..."}}
{:role "assistant" :content "A1"}
{:role "user" :content "Q2"}
{:role "reason" :content {:text "more thinking..."}}
@@ -288,12 +288,53 @@
{:role "assistant" :content "A1"}
{:role "user" :content "Q2"}
{:role "reason" :content {:text "more thinking..."}}
- {:role "assistant" :content "A2"}]))))
+ {:role "assistant" :content "A2"}]
+ :turn))))
- (testing "No user message leaves list unchanged"
+ (testing "reasoningHistory \"all\" preserves all reasoning"
+ (is (match?
+ [{:role "user" :content "Q1"}
+ {:role "reason" :content {:text "r1"}}
+ {:role "assistant" :content "A1"}
+ {:role "user" :content "Q2"}
+ {:role "reason" :content {:text "r2"}}
+ {:role "assistant" :content "A2"}]
+ (#'llm-providers.openai-chat/prune-history
+ [{:role "user" :content "Q1"}
+ {:role "reason" :content {:text "r1"}}
+ {:role "assistant" :content "A1"}
+ {:role "user" :content "Q2"}
+ {:role "reason" :content {:text "r2"}}
+ {:role "assistant" :content "A2"}]
+ :all))))
+
+ (testing "reasoningHistory \"off\" removes all reasoning messages"
+ (is (match?
+ [{:role "user" :content "Q1"}
+ {:role "assistant" :content "A1"}
+ {:role "user" :content "Q2"}
+ {:role "assistant" :content "A2"}]
+ (#'llm-providers.openai-chat/prune-history
+ [{:role "user" :content "Q1"}
+ {:role "reason" :content {:text "r1" :delta-reasoning? true}}
+ {:role "assistant" :content "A1"}
+ {:role "user" :content "Q2"}
+ {:role "reason" :content {:text "r2"}}
+ {:role "assistant" :content "A2"}]
+ :off))))
+
+ (testing "No user message - reasoningHistory \"turn\" leaves list unchanged"
(let [msgs [{:role "assistant" :content "A"}
{:role "reason" :content {:text "r"}}]]
- (is (= msgs (#'llm-providers.openai-chat/prune-history msgs))))))
+ (is (= msgs (#'llm-providers.openai-chat/prune-history msgs :turn)))))
+
+ (testing "No user message - reasoningHistory \"off\" removes reason"
+ (is (match?
+ [{:role "assistant" :content "A"}]
+ (#'llm-providers.openai-chat/prune-history
+ [{:role "assistant" :content "A"}
+ {:role "reason" :content {:text "r"}}]
+ :off)))))
(deftest valid-message-test
(testing "Tool messages are always kept"