Skip to content
Open
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions src/config/index.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,10 @@ export const chatgptApiModelKeys = [
'chatgptApi4_1',
'chatgptApi4_1_mini',
'chatgptApi4_1_nano',
'chatgptApiO4Mini',
'chatgptApiGpt5',
'chatgptApiGpt5Mini',
'chatgptApiGpt5Nano',
]
export const customApiModelKeys = ['customModel']
export const ollamaApiModelKeys = ['ollamaModel']
Expand Down Expand Up @@ -256,6 +260,11 @@ export const Models = {
chatgptApi4_1_mini: { value: 'gpt-4.1-mini', desc: 'ChatGPT (GPT-4.1 mini)' },
chatgptApi4_1_nano: { value: 'gpt-4.1-nano', desc: 'ChatGPT (GPT-4.1 nano)' },

chatgptApiO4Mini: { value: 'o4-mini', desc: 'ChatGPT (o4-mini)' },
chatgptApiGpt5: { value: 'gpt-5', desc: 'ChatGPT (gpt-5)' },
chatgptApiGpt5Mini: { value: 'gpt-5-mini', desc: 'ChatGPT (gpt-5-mini)' },
chatgptApiGpt5Nano: { value: 'gpt-5-nano', desc: 'ChatGPT (gpt-5-nano)' },

claude2WebFree: { value: '', desc: 'Claude.ai (Web)' },
claude12Api: { value: 'claude-instant-1.2', desc: 'Claude.ai (API, Claude Instant 1.2)' },
claude2Api: { value: 'claude-2.0', desc: 'Claude.ai (API, Claude 2)' },
Expand Down Expand Up @@ -541,6 +550,10 @@ export const defaultConfig = {
'openRouter_anthropic_claude_sonnet4',
'openRouter_google_gemini_2_5_pro',
'openRouter_openai_o3',
'chatgptApiO4Mini',
'chatgptApiGpt5',
'chatgptApiGpt5Mini',
'chatgptApiGpt5Nano',
],
customApiModes: [
{
Expand Down
166 changes: 138 additions & 28 deletions src/services/apis/openai-api.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,38 @@ import { fetchSSE } from '../../utils/fetch-sse.mjs'
import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs'
import { isEmpty } from 'lodash-es'
import { getCompletionPromptBase, pushRecord, setAbortController } from './shared.mjs'
import { getModelValue } from '../../utils/model-name-convert.mjs'
import { getModelValue, isUsingReasoningModel } from '../../utils/model-name-convert.mjs'

/**
* Extract content from structured response arrays for reasoning models
* @param {Array} contentArray - Array of content segments
* @returns {string} - Extracted text content
*/
function extractContentFromArray(contentArray) {
if (!Array.isArray(contentArray)) {
console.debug('Content is not an array, returning empty string')
return ''
}

try {
const parts = contentArray
.map((part) => {
if (typeof part === 'string') return part
if (part && typeof part === 'object') {
// Prefer output_text segments; fallback to text property
if (typeof part.output_text === 'string') return part.output_text
if (typeof part.text === 'string') return part.text
}
return ''
})
.filter(Boolean)

return parts.join('')
} catch (error) {
console.debug('Error extracting content from array:', error)
return ''
}
}

/**
* @param {Browser.Runtime.Port} port
Expand Down Expand Up @@ -65,10 +96,16 @@ export async function generateAnswersWithGptCompletionApi(port, question, sessio
return
}

answer += data.choices[0].text
const choice = data.choices?.[0]
if (!choice) {
console.debug('No choice in response data')
return
}

answer += choice.text
port.postMessage({ answer: answer, done: false, session: null })

if (data.choices[0]?.finish_reason) {
if (choice.finish_reason) {
finish()
return
}
Expand Down Expand Up @@ -116,37 +153,74 @@ export async function generateAnswersWithChatgptApiCompat(
) {
const { controller, messageListener, disconnectListener } = setAbortController(port)
const model = getModelValue(session)
const isReasoningModel = isUsingReasoningModel(session)

const config = await getUserConfig()
const prompt = getConversationPairs(
session.conversationRecords.slice(-config.maxConversationContextLength),
false,
)
prompt.push({ role: 'user', content: question })

// Filter messages based on model type
const filteredPrompt = isReasoningModel
? prompt.filter((msg) => msg.role === 'user' || msg.role === 'assistant')
: prompt

filteredPrompt.push({ role: 'user', content: question })

let answer = ''
let finished = false
const finish = () => {
if (finished) return
finished = true
Comment on lines 176 to 179
Copy link
Preview

Copilot AI Aug 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The finish function checks and sets the 'finished' flag without synchronization, which could lead to race conditions in concurrent scenarios. Consider using atomic operations or proper synchronization to ensure thread safety.

Suggested change
let finished = false
const finish = () => {
if (finished) return
finished = true
// Use an atomic flag for thread safety
const finishedBuffer = new SharedArrayBuffer(4);
const finishedFlag = new Int32Array(finishedBuffer);
const finish = () => {
// Atomically check and set the finished flag
if (Atomics.compareExchange(finishedFlag, 0, 0, 1) !== 0) return;

Copilot uses AI. Check for mistakes.

pushRecord(session, question, answer)
console.debug('conversation history', { content: session.conversationRecords })
port.postMessage({ answer: null, done: true, session: session })
port.postMessage({ answer: null, done: true, session })
}

// Build request body with reasoning model-specific parameters
const requestBody = {
messages: filteredPrompt,
model,
...extraBody,
}

if (isReasoningModel) {
// Reasoning models use max_completion_tokens instead of max_tokens
requestBody.max_completion_tokens = config.maxResponseTokenLength
// Reasoning models don't support streaming during beta
requestBody.stream = false
// Reasoning models have fixed parameters during beta
requestBody.temperature = 1
requestBody.top_p = 1
requestBody.n = 1
requestBody.presence_penalty = 0
requestBody.frequency_penalty = 0
// Disallow tools/functions/function calling in reasoning mode
delete requestBody.tools
delete requestBody.tool_choice
delete requestBody.functions
delete requestBody.function_call
} else {
// Non-reasoning models use the existing behavior
requestBody.stream = true
requestBody.max_tokens = config.maxResponseTokenLength
requestBody.temperature = config.temperature
}

// Validate API key
if (!apiKey || typeof apiKey !== 'string' || !apiKey.trim()) {
throw new Error('Invalid API key provided')
Copy link
Preview

Copilot AI Aug 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The API key validation throws a generic Error which may not provide clear guidance to users about the specific issue. Consider throwing a more specific error type or providing a more descriptive error message that helps users understand what constitutes a valid API key.

Suggested change
if (!apiKey || typeof apiKey !== 'string' || !apiKey.trim()) {
throw new Error('Invalid API key provided')
if (
!apiKey ||
typeof apiKey !== 'string' ||
!apiKey.trim() ||
!/^sk-[A-Za-z0-9]{20,}$/.test(apiKey.trim())
) {
// OpenAI API keys typically start with "sk-" and are at least 24 chars long
class InvalidApiKeyError extends Error {
constructor(message) {
super(message);
this.name = 'InvalidApiKeyError';
}
}
throw new InvalidApiKeyError(
'Invalid API key provided. API key must be a non-empty string starting with "sk-" and at least 24 characters long.'
);

Copilot uses AI. Check for mistakes.

}

await fetchSSE(`${baseUrl}/chat/completions`, {
method: 'POST',
signal: controller.signal,
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
Authorization: `Bearer ${apiKey.trim()}`,
},
body: JSON.stringify({
messages: prompt,
model,
stream: true,
max_tokens: config.maxResponseTokenLength,
temperature: config.temperature,
...extraBody,
}),
body: JSON.stringify(requestBody),
onMessage(message) {
console.debug('sse message', message)
if (finished) return
Expand All @@ -162,21 +236,57 @@ export async function generateAnswersWithChatgptApiCompat(
return
}

const delta = data.choices[0]?.delta?.content
const content = data.choices[0]?.message?.content
const text = data.choices[0]?.text
if (delta !== undefined) {
answer += delta
} else if (content) {
answer = content
} else if (text) {
answer += text
}
port.postMessage({ answer: answer, done: false, session: null })
if (isReasoningModel) {
// For reasoning models (non-streaming), get the complete response
const choice = data.choices?.[0]
if (!choice) {
console.debug('No choice in response data for reasoning model')
return
}

if (data.choices[0]?.finish_reason) {
finish()
return
let content = choice.message?.content ?? choice.text

// Handle structured response arrays for reasoning models
if (Array.isArray(content)) {
content = extractContentFromArray(content)
}

// Ensure content is a string and not empty
if (content && typeof content === 'string') {
const trimmedContent = content.trim()
if (trimmedContent) {
answer = trimmedContent
port.postMessage({ answer, done: false, session: null })
}
}

// Only finish when we have a proper finish reason
if (choice.finish_reason) {
finish()
}
} else {
// For non-reasoning models (streaming), handle delta content
const choice = data.choices?.[0]
if (!choice) {
console.debug('No choice in response data')
return
}
const delta = choice.delta?.content
const content = choice.message?.content
const text = choice.text
if (delta !== undefined) {
answer += delta
} else if (content) {
answer = content
} else if (text) {
answer += text
}
port.postMessage({ answer, done: false, session: null })

if (choice.finish_reason) {
finish()
return
}
}
},
async onStart() {},
Expand Down
21 changes: 21 additions & 0 deletions src/utils/model-name-convert.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -164,3 +164,24 @@ export function isInApiModeGroup(apiModeGroup, configOrSession) {
const [, { value: groupValue }] = foundGroup
return groupValue === apiModeGroup
}

export function isUsingReasoningModel(configOrSession) {
const modelValue = getModelValue(configOrSession)
if (!modelValue) return false

// Match o1, o3, or o4 models with optional standard OpenAI suffixes
// Allows: o1, o1-preview, o1-mini, o3, o3-mini, o4, o4-mini, etc.
// Prevents: o10, o30, o40, o1x, o3x, o4x, and other invalid patterns
if (/^o[134](?:$|-(?:preview|mini|turbo|instruct|nano|small|medium|large))$/.test(modelValue)) {
Copy link
Preview

Copilot AI Aug 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The regex pattern includes suffixes like 'turbo', 'instruct', 'nano', 'small', 'medium', 'large' that may not be valid for reasoning models. Consider using a more restrictive pattern that only includes confirmed OpenAI reasoning model suffixes like 'preview' and 'mini' to prevent false positives.

Suggested change
if (/^o[134](?:$|-(?:preview|mini|turbo|instruct|nano|small|medium|large))$/.test(modelValue)) {
if (/^o[134](?:$|-(?:preview|mini))$/.test(modelValue)) {

Copilot uses AI. Check for mistakes.

return true
}

// Match gpt-5* pattern but exclude gpt-5-chat-* variants
// Allows: gpt-5, gpt-5-mini, gpt-5-nano, gpt-5-preview, gpt-5-turbo
// Prevents: gpt-5-chat-latest, gpt-5-chat, etc.
if (modelValue.startsWith('gpt-5') && !modelValue.startsWith('gpt-5-chat')) {
return true
}

return false
}