Skip to content

Commit a2c1069

Browse files
committed
fix: respect 1M token context window for auto-condensing
- Update Task.ts to use actual context window from api.getModel().info - This ensures the 1M token extension is properly respected when enabled - Fixes auto-condensing triggering at base model limits instead of extended limits Fixes #9831
1 parent 29385e0 commit a2c1069

File tree

1 file changed

+8
-4
lines changed

1 file changed

+8
-4
lines changed

src/core/task/Task.ts

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3338,10 +3338,12 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
33383338
const { profileThresholds = {} } = state ?? {}
33393339

33403340
const { contextTokens } = this.getTokenUsage()
3341-
const modelInfo = this.api.getModel().info
3341+
// Get the actual model info from the API handler which includes 1M context updates
3342+
const actualModel = this.api.getModel()
3343+
const modelInfo = actualModel.info
33423344

33433345
const maxTokens = getModelMaxOutputTokens({
3344-
modelId: this.api.getModel().id,
3346+
modelId: actualModel.id,
33453347
model: modelInfo,
33463348
settings: this.apiConfiguration,
33473349
})
@@ -3483,10 +3485,12 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
34833485
const { contextTokens } = this.getTokenUsage()
34843486

34853487
if (contextTokens) {
3486-
const modelInfo = this.api.getModel().info
3488+
// Get the actual model info from the API handler which includes 1M context updates
3489+
const actualModel = this.api.getModel()
3490+
const modelInfo = actualModel.info
34873491

34883492
const maxTokens = getModelMaxOutputTokens({
3489-
modelId: this.api.getModel().id,
3493+
modelId: actualModel.id,
34903494
model: modelInfo,
34913495
settings: this.apiConfiguration,
34923496
})

0 commit comments

Comments
 (0)