diff --git a/src/api/index.ts b/src/api/index.ts index 93284ace184..b5777439843 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -19,7 +19,7 @@ import { VsCodeLmHandler } from "./providers/vscode-lm" import { ApiStream } from "./transform/stream" import { UnboundHandler } from "./providers/unbound" import { RequestyHandler } from "./providers/requesty" -import { PearAiHandler } from "./providers/pearai" +import { PearAiHandler } from "./providers/pearai/pearai" import { HumanRelayHandler } from "./providers/human-relay" import { FakeAIHandler } from "./providers/fake-ai" diff --git a/src/api/providers/pearai.ts b/src/api/providers/pearai/pearai.ts similarity index 70% rename from src/api/providers/pearai.ts rename to src/api/providers/pearai/pearai.ts index 6c2898d165a..1fdba1b52d9 100644 --- a/src/api/providers/pearai.ts +++ b/src/api/providers/pearai/pearai.ts @@ -1,13 +1,15 @@ import * as vscode from "vscode" -import { ApiHandlerOptions, PEARAI_URL, ModelInfo } from "../../shared/api" -import { AnthropicHandler } from "./anthropic" -import { DeepSeekHandler } from "./deepseek" +import { ApiHandlerOptions, ModelInfo } from "../../../shared/api" +import { AnthropicHandler } from "../anthropic" +import { DeepSeekHandler } from "../deepseek" import Anthropic from "@anthropic-ai/sdk" -import { BaseProvider } from "./base-provider" -import { SingleCompletionHandler } from "../" -import { OpenRouterHandler } from "./openrouter" -import { GeminiHandler } from "./gemini" -import { OpenAiHandler } from "./openai" +import { BaseProvider } from "../base-provider" +import { SingleCompletionHandler } from "../.." +import { OpenRouterHandler } from "../openrouter" +import { GeminiHandler } from "../gemini" +import { OpenAiHandler } from "../openai" +import { PearAIGenericHandler } from "./pearaiGeneric" +import { PEARAI_URL } from "../../../shared/pearaiApi" interface PearAiModelsResponse { models: { @@ -20,7 +22,9 @@ interface PearAiModelsResponse { } export class PearAiHandler extends BaseProvider implements SingleCompletionHandler { - private handler!: AnthropicHandler | OpenAiHandler + private handler!: AnthropicHandler | PearAIGenericHandler + private pearAiModelsResponse: PearAiModelsResponse | null = null + private options: ApiHandlerOptions constructor(options: ApiHandlerOptions) { super() @@ -40,8 +44,9 @@ export class PearAiHandler extends BaseProvider implements SingleCompletionHandl } else { vscode.commands.executeCommand("pearai.checkPearAITokens", undefined) } + this.options = options - this.handler = new OpenAiHandler({ + this.handler = new PearAIGenericHandler({ ...options, openAiBaseUrl: PEARAI_URL, openAiApiKey: options.pearaiApiKey, @@ -64,8 +69,9 @@ export class PearAiHandler extends BaseProvider implements SingleCompletionHandl throw new Error(`Failed to fetch models: ${response.statusText}`) } const data = (await response.json()) as PearAiModelsResponse + this.pearAiModelsResponse = data const underlyingModel = data.models[modelId]?.underlyingModelUpdated || "claude-3-5-sonnet-20241022" - if (underlyingModel.startsWith("claude")) { + if (underlyingModel.startsWith("claude") || modelId.startsWith("anthropic/")) { // Default to Claude this.handler = new AnthropicHandler({ ...options, @@ -74,7 +80,7 @@ export class PearAiHandler extends BaseProvider implements SingleCompletionHandl apiModelId: underlyingModel, }) } else { - this.handler = new OpenAiHandler({ + this.handler = new PearAIGenericHandler({ ...options, openAiBaseUrl: PEARAI_URL, openAiApiKey: options.pearaiApiKey, @@ -91,14 +97,14 @@ export class PearAiHandler extends BaseProvider implements SingleCompletionHandl apiModelId: "claude-3-5-sonnet-20241022", }) } - } else if (modelId.startsWith("claude")) { + } else if (modelId.startsWith("claude") || modelId.startsWith("anthropic/")) { this.handler = new AnthropicHandler({ ...options, apiKey: options.pearaiApiKey, anthropicBaseUrl: PEARAI_URL, }) } else { - this.handler = new OpenAiHandler({ + this.handler = new PearAIGenericHandler({ ...options, openAiBaseUrl: PEARAI_URL, openAiApiKey: options.pearaiApiKey, @@ -108,22 +114,25 @@ export class PearAiHandler extends BaseProvider implements SingleCompletionHandl } getModel(): { id: string; info: ModelInfo } { - console.dir(this.handler) - const baseModel = this.handler.getModel() - return { - id: baseModel.id, - info: { - ...baseModel.info, - // Inherit all capabilities from the underlying model - supportsImages: baseModel.info.supportsImages, - supportsComputerUse: baseModel.info.supportsComputerUse, - supportsPromptCache: baseModel.info.supportsPromptCache, - inputPrice: baseModel.info.inputPrice || 0, - outputPrice: baseModel.info.outputPrice || 0, - cacheWritesPrice: baseModel.info.cacheWritesPrice ? baseModel.info.cacheWritesPrice : undefined, - cacheReadsPrice: baseModel.info.cacheReadsPrice ? baseModel.info.cacheReadsPrice : undefined, - }, + if ( + this.pearAiModelsResponse && + this.options.apiModelId === "pearai-model" && + this.pearAiModelsResponse.models + ) { + const modelInfo = this.pearAiModelsResponse.models[this.options.apiModelId] + if (modelInfo) { + return { + id: this.options.apiModelId, + info: { + contextWindow: modelInfo.contextWindow || 4096, // provide default or actual value + supportsPromptCache: modelInfo.supportsPromptCaching || false, // provide default or actual value + ...modelInfo, + }, + } + } } + const baseModel = this.handler.getModel() + return baseModel } async *createMessage(systemPrompt: string, messages: any[]): AsyncGenerator { diff --git a/src/api/providers/pearai/pearaiGeneric.ts b/src/api/providers/pearai/pearaiGeneric.ts new file mode 100644 index 00000000000..03cbd61a883 --- /dev/null +++ b/src/api/providers/pearai/pearaiGeneric.ts @@ -0,0 +1,335 @@ +/* +pearaiGeneric.ts is the same as openai.ts, with changes to support the PearAI API. It currently is used for all hosted non-Anthropic models. +*/ + +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI, { AzureOpenAI } from "openai" +import axios from "axios" + +import { + ApiHandlerOptions, + azureOpenAiDefaultApiVersion, + ModelInfo, + openAiModelInfoSaneDefaults, +} from "../../../shared/api" +import { SingleCompletionHandler } from "../../index" +import { convertToOpenAiMessages } from "../../transform/openai-format" +import { convertToR1Format } from "../../transform/r1-format" +import { convertToSimpleMessages } from "../../transform/simple-format" +import { ApiStream, ApiStreamUsageChunk } from "../../transform/stream" +import { BaseProvider } from "../base-provider" +import { XmlMatcher } from "../../../utils/xml-matcher" +import { allModels, pearAiDefaultModelId, pearAiDefaultModelInfo } from "../../../shared/pearaiApi" +import { calculateApiCostOpenAI } from "../../../utils/cost" + +const DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6 + +export const defaultHeaders = { + "HTTP-Referer": "https://trypear.ai", + "X-Title": "PearAI", +} + +export interface OpenAiHandlerOptions extends ApiHandlerOptions {} + +export class PearAIGenericHandler extends BaseProvider implements SingleCompletionHandler { + protected options: OpenAiHandlerOptions + private client: OpenAI + + constructor(options: OpenAiHandlerOptions) { + super() + this.options = options + + const baseURL = this.options.openAiBaseUrl ?? "https://api.openai.com/v1" + const apiKey = this.options.openAiApiKey ?? "not-provided" + let urlHost: string + + try { + urlHost = new URL(this.options.openAiBaseUrl ?? "").host + } catch (error) { + // Likely an invalid `openAiBaseUrl`; we're still working on + // proper settings validation. + urlHost = "" + } + + if (urlHost === "azure.com" || urlHost.endsWith(".azure.com") || options.openAiUseAzure) { + // Azure API shape slightly differs from the core API shape: + // https://github.com/openai/openai-node?tab=readme-ov-file#microsoft-azure-openai + this.client = new AzureOpenAI({ + baseURL, + apiKey, + apiVersion: this.options.azureApiVersion || azureOpenAiDefaultApiVersion, + defaultHeaders, + }) + } else { + this.client = new OpenAI({ baseURL, apiKey, defaultHeaders }) + } + } + + override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { + const modelInfo = this.getModel().info + const modelUrl = this.options.openAiBaseUrl ?? "" + const modelId = this.options.openAiModelId ?? "" + + const deepseekReasoner = modelId.includes("deepseek-reasoner") + const ark = modelUrl.includes(".volces.com") + + if (modelId.startsWith("o3-mini")) { + yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages) + return + } + + if (this.options.openAiStreamingEnabled ?? true) { + let systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = { + role: "system", + content: systemPrompt, + } + + let convertedMessages + if (deepseekReasoner) { + convertedMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) + } else if (ark) { + convertedMessages = [systemMessage, ...convertToSimpleMessages(messages)] + } else { + if (modelInfo.supportsPromptCache) { + systemMessage = { + role: "system", + content: [ + { + type: "text", + text: systemPrompt, + // @ts-ignore-next-line + cache_control: { type: "ephemeral" }, + }, + ], + } + } + convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)] + if (modelInfo.supportsPromptCache) { + // Note: the following logic is copied from openrouter: + // Add cache_control to the last two user messages + // (note: this works because we only ever add one user message at a time, but if we added multiple we'd need to mark the user message before the last assistant message) + const lastTwoUserMessages = convertedMessages.filter((msg) => msg.role === "user").slice(-2) + lastTwoUserMessages.forEach((msg) => { + if (typeof msg.content === "string") { + msg.content = [{ type: "text", text: msg.content }] + } + if (Array.isArray(msg.content)) { + // NOTE: this is fine since env details will always be added at the end. but if it weren't there, and the user added a image_url type message, it would pop a text part before it and then move it after to the end. + let lastTextPart = msg.content.filter((part) => part.type === "text").pop() + + if (!lastTextPart) { + lastTextPart = { type: "text", text: "..." } + msg.content.push(lastTextPart) + } + // @ts-ignore-next-line + lastTextPart["cache_control"] = { type: "ephemeral" } + } + }) + } + } + + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { + model: modelId, + temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0), + messages: convertedMessages, + stream: true as const, + stream_options: { include_usage: true }, + } + if (this.options.includeMaxTokens) { + requestOptions.max_tokens = modelInfo.maxTokens + } + + const stream = await this.client.chat.completions.create(requestOptions) + + const matcher = new XmlMatcher( + "think", + (chunk) => + ({ + type: chunk.matched ? "reasoning" : "text", + text: chunk.data, + }) as const, + ) + + let lastUsage + + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta ?? {} + + if (delta.content) { + for (const chunk of matcher.update(delta.content)) { + yield chunk + } + } + + if ("reasoning_content" in delta && delta.reasoning_content) { + yield { + type: "reasoning", + text: (delta.reasoning_content as string | undefined) || "", + } + } + if (chunk.usage) { + lastUsage = chunk.usage + } + } + for (const chunk of matcher.final()) { + yield chunk + } + + if (lastUsage) { + yield this.processUsageMetrics(lastUsage, modelInfo) + } + } else { + // o1 for instance doesnt support streaming, non-1 temp, or system prompt + const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = { + role: "user", + content: systemPrompt, + } + + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: modelId, + messages: deepseekReasoner + ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) + : [systemMessage, ...convertToOpenAiMessages(messages)], + } + + const response = await this.client.chat.completions.create(requestOptions) + + yield { + type: "text", + text: response.choices[0]?.message.content || "", + } + yield this.processUsageMetrics(response.usage, modelInfo) + } + } + + protected processUsageMetrics(usage: any, modelInfo?: ModelInfo): ApiStreamUsageChunk { + const inputTokens = usage?.prompt_tokens || 0 + const outputTokens = usage?.completion_tokens || 0 + const cacheWriteTokens = usage?.prompt_tokens_details?.caching_tokens || 0 + const cacheReadTokens = usage?.prompt_tokens_details?.cached_tokens || 0 + const totalCost = modelInfo + ? calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) + : 0 + return { + type: "usage", + inputTokens: inputTokens, + outputTokens: outputTokens, + cacheWriteTokens: cacheWriteTokens, + cacheReadTokens: cacheReadTokens, + totalCost: totalCost, + } + } + + override getModel(): { id: string; info: ModelInfo } { + const modelId = this.options.openAiModelId ?? "none" + return { + id: modelId, + info: allModels[modelId], + } + } + + async completePrompt(prompt: string): Promise { + try { + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: this.getModel().id, + messages: [{ role: "user", content: prompt }], + } + + const response = await this.client.chat.completions.create(requestOptions) + return response.choices[0]?.message.content || "" + } catch (error) { + if (error instanceof Error) { + throw new Error(`OpenAI completion error: ${error.message}`) + } + throw error + } + } + + private async *handleO3FamilyMessage( + modelId: string, + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + ): ApiStream { + if (this.options.openAiStreamingEnabled ?? true) { + const stream = await this.client.chat.completions.create({ + model: "o3-mini", + messages: [ + { + role: "developer", + content: `Formatting re-enabled\n${systemPrompt}`, + }, + ...convertToOpenAiMessages(messages), + ], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort: this.getModel().info.reasoningEffort, + }) + + yield* this.handleStreamResponse(stream) + } else { + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: modelId, + messages: [ + { + role: "developer", + content: `Formatting re-enabled\n${systemPrompt}`, + }, + ...convertToOpenAiMessages(messages), + ], + } + + const response = await this.client.chat.completions.create(requestOptions) + + yield { + type: "text", + text: response.choices[0]?.message.content || "", + } + yield this.processUsageMetrics(response.usage) + } + } + + private async *handleStreamResponse(stream: AsyncIterable): ApiStream { + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta + if (delta?.content) { + yield { + type: "text", + text: delta.content, + } + } + + if (chunk.usage) { + yield { + type: "usage", + inputTokens: chunk.usage.prompt_tokens || 0, + outputTokens: chunk.usage.completion_tokens || 0, + cacheReadTokens: chunk.usage.prompt_tokens_details?.cached_tokens, + } + } + } + } +} + +export async function getOpenAiModels(baseUrl?: string, apiKey?: string) { + try { + if (!baseUrl) { + return [] + } + + if (!URL.canParse(baseUrl)) { + return [] + } + + const config: Record = {} + + if (apiKey) { + config["headers"] = { Authorization: `Bearer ${apiKey}` } + } + + const response = await axios.get(`${baseUrl}/models`, config) + const modelsArray = response.data?.data?.map((model: any) => model.id) || [] + return [...new Set(modelsArray)] + } catch (error) { + return [] + } +} diff --git a/src/services/mcp/McpHub.ts b/src/services/mcp/McpHub.ts index 2919d69ff6c..ad481a16d3d 100644 --- a/src/services/mcp/McpHub.ts +++ b/src/services/mcp/McpHub.ts @@ -30,7 +30,7 @@ import { } from "../../shared/mcp" import { fileExistsAtPath } from "../../utils/fs" import { arePathsEqual } from "../../utils/path" -import { PEARAI_URL } from "../../shared/api" +import { PEARAI_URL } from "../../shared/pearaiApi" export type McpConnection = { server: McpServer diff --git a/src/shared/api.ts b/src/shared/api.ts index b1e57ce5960..f0c78a18a50 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -767,6 +767,14 @@ export const geminiModels = { inputPrice: 0, outputPrice: 0, }, + "gemini-2.0-flash": { + maxTokens: 8192, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.15, + outputPrice: 0.6, + }, "gemini-2.0-flash-lite-preview-02-05": { maxTokens: 8192, contextWindow: 1_048_576, @@ -1041,39 +1049,3 @@ export const unboundDefaultModelInfo: ModelInfo = { cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, } -// CHANGE AS NEEDED FOR TESTING -// PROD: -export const PEARAI_URL = "https://server.trypear.ai/pearai-server-api2/integrations/cline" -// DEV: -// export const PEARAI_URL = "http://localhost:8000/integrations/cline" - -// PearAI -export type PearAiModelId = keyof typeof pearAiModels -export const pearAiDefaultModelId: PearAiModelId = "pearai-model" -export const pearAiDefaultModelInfo: ModelInfo = { - maxTokens: 8192, - contextWindow: 200_000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 3.0, - outputPrice: 15.0, - cacheWritesPrice: 3.75, - cacheReadsPrice: 0.3, - description: - "PearAI Model automatically routes you to the most best / most suitable model on the market. Recommended for most users.", -} - -export const pearAiModels = { - "pearai-model": { - maxTokens: 8192, - contextWindow: 200_000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 3.0, - outputPrice: 15.0, - cacheWritesPrice: 3.75, - cacheReadsPrice: 0.3, - description: - "PearAI Model automatically routes you to the most best / most suitable model on the market. Recommended for most users.", - }, -} as const satisfies Record diff --git a/src/shared/pearaiApi.ts b/src/shared/pearaiApi.ts new file mode 100644 index 00000000000..f1409a7aaff --- /dev/null +++ b/src/shared/pearaiApi.ts @@ -0,0 +1,132 @@ +// CHANGE AS NEEDED FOR DEVELOPMENT +// PROD: +export const PEARAI_URL = "https://server.trypear.ai/pearai-server-api2/integrations/cline" +// DEV: +// export const PEARAI_URL = "http://localhost:8000/integrations/cline" + +import { + anthropicModels, + bedrockModels, + deepSeekModels, + geminiModels, + glamaDefaultModelId, + glamaDefaultModelInfo, + mistralModels, + ModelInfo, + openAiNativeModels, + openRouterDefaultModelId, + openRouterDefaultModelInfo, + requestyDefaultModelId, + requestyDefaultModelInfo, + unboundDefaultModelId, + unboundDefaultModelInfo, + vertexModels, +} from "./api" + +// PearAI +export type PearAiModelId = keyof typeof pearAiModels +export const pearAiDefaultModelId: PearAiModelId = "pearai-model" +export const pearAiDefaultModelInfo: ModelInfo = { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, + description: + "PearAI Model automatically routes you to the most best / most suitable model on the market. Recommended for most users.", +} + +export const pearAiModels = { + "pearai-model": { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, + description: + "PearAI Model automatically routes you to the most best / most suitable model on the market. Recommended for most users.", + }, +} as const satisfies Record + +export const allModels: { [key: string]: ModelInfo } = { + // Anthropic models + ...Object.entries(anthropicModels).reduce( + (acc, [key, value]) => ({ + ...acc, + [`anthropic/${key}`]: value, + }), + {}, + ), + + // Bedrock models + ...Object.entries(bedrockModels).reduce( + (acc, [key, value]) => ({ + ...acc, + [`bedrock/${key}`]: value, + }), + {}, + ), + + // Glama models (single default model) + [`glama/${glamaDefaultModelId}`]: glamaDefaultModelInfo, + + // Requesty models (single default model) + [`requesty/${requestyDefaultModelId}`]: requestyDefaultModelInfo, + + // OpenRouter models (single default model) + [`openrouter/${openRouterDefaultModelId}`]: openRouterDefaultModelInfo, + + // Vertex models + ...Object.entries(vertexModels).reduce( + (acc, [key, value]) => ({ + ...acc, + [`vertex/${key}`]: value, + }), + {}, + ), + + // Gemini models + ...Object.entries(geminiModels).reduce( + (acc, [key, value]) => ({ + ...acc, + [`gemini/${key}`]: value, + }), + {}, + ), + + // OpenAI Native models + ...Object.entries(openAiNativeModels).reduce( + (acc, [key, value]) => ({ + ...acc, + [`openai-native/${key}`]: value, + }), + {}, + ), + + // DeepSeek models + ...Object.entries(deepSeekModels).reduce( + (acc, [key, value]) => ({ + ...acc, + [`deepseek/${key}`]: value, + }), + {}, + ), + + // Mistral models + ...Object.entries(mistralModels).reduce( + (acc, [key, value]) => ({ + ...acc, + [`mistral/${key}`]: value, + }), + {}, + ), + + // Unbound models (single default model) + [`unbound/${unboundDefaultModelId}`]: unboundDefaultModelInfo, +} as const satisfies Record diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index aa050221a72..f20ac5c4cff 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -14,7 +14,6 @@ import { import { McpServer, McpTool } from "../../../../src/shared/mcp" import { findLast } from "../../../../src/shared/array" import { combineApiRequests } from "../../../../src/shared/combineApiRequests" -import { ModelInfo, pearAiDefaultModelId, pearAiDefaultModelInfo, PEARAI_URL } from "../../../../src/shared/api" import { combineCommandSequences } from "../../../../src/shared/combineCommandSequences" import { getApiMetrics } from "../../../../src/shared/getApiMetrics" import { useExtensionState } from "../../context/ExtensionStateContext" diff --git a/webview-ui/src/components/chat/TaskHeader.tsx b/webview-ui/src/components/chat/TaskHeader.tsx index 1a790bbec05..6423794b640 100644 --- a/webview-ui/src/components/chat/TaskHeader.tsx +++ b/webview-ui/src/components/chat/TaskHeader.tsx @@ -18,6 +18,7 @@ import Thumbnails from "../common/Thumbnails" import { normalizeApiConfiguration } from "../settings/ApiOptions" import { DeleteTaskDialog } from "../history/DeleteTaskDialog" import { vscBadgeBackground, vscEditorBackground, vscInputBackground } from "../ui" +import { usePearAiModels } from "@/hooks/usePearAiModels" interface TaskHeaderProps { task: ClineMessage @@ -44,7 +45,11 @@ const TaskHeader: React.FC = ({ }) => { const { t } = useTranslation() const { apiConfiguration, currentTaskItem } = useExtensionState() - const { selectedModelInfo } = useMemo(() => normalizeApiConfiguration(apiConfiguration), [apiConfiguration]) + const pearAiModels = usePearAiModels(apiConfiguration) + const { selectedModelInfo } = useMemo(() => { + return normalizeApiConfiguration(apiConfiguration, pearAiModels) + }, [apiConfiguration, pearAiModels]) + const [isTaskExpanded, setIsTaskExpanded] = useState(true) const [isTextExpanded, setIsTextExpanded] = useState(false) const [showSeeMore, setShowSeeMore] = useState(false) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index a7d9f71c03b..e534f6b66eb 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -37,10 +37,6 @@ import { unboundDefaultModelInfo, requestyDefaultModelId, requestyDefaultModelInfo, - pearAiModels, - pearAiDefaultModelId, - pearAiDefaultModelInfo, - PEARAI_URL, ApiProvider, } from "../../../../src/shared/api" import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage" @@ -60,6 +56,7 @@ import { validateApiConfiguration, validateModelId, validateBedrockArn } from "@ import { ApiErrorMessage } from "./ApiErrorMessage" import { ThinkingBudget } from "./ThinkingBudget" import { usePearAiModels } from "../../hooks/usePearAiModels" +import { allModels, pearAiDefaultModelId, pearAiDefaultModelInfo } from "../../../../src/shared/pearaiApi" interface ApiOptionsProps { uriScheme: string | undefined @@ -1693,10 +1690,8 @@ export function normalizeApiConfiguration( } case "pearai": { // Always use the models from the hook which are fetched when provider is selected - return getProviderData( - pearAiModelsQuery || { [pearAiDefaultModelId]: pearAiDefaultModelInfo }, - pearAiDefaultModelId, - ) + let query = pearAiModelsQuery + return getProviderData(pearAiModelsQuery || {}, pearAiDefaultModelId) } default: return getProviderData(anthropicModels, anthropicDefaultModelId) diff --git a/webview-ui/src/components/settings/constants.ts b/webview-ui/src/components/settings/constants.ts index 02d8bd52455..6da2a89651e 100644 --- a/webview-ui/src/components/settings/constants.ts +++ b/webview-ui/src/components/settings/constants.ts @@ -7,7 +7,6 @@ import { geminiModels, mistralModels, openAiNativeModels, - pearAiModels, vertexModels, } from "../../../../src/shared/api" diff --git a/webview-ui/src/context/ExtensionStateContext.tsx b/webview-ui/src/context/ExtensionStateContext.tsx index 74fa58f6416..99be49b6cc2 100644 --- a/webview-ui/src/context/ExtensionStateContext.tsx +++ b/webview-ui/src/context/ExtensionStateContext.tsx @@ -12,9 +12,8 @@ import { unboundDefaultModelInfo, requestyDefaultModelId, requestyDefaultModelInfo, - PEARAI_URL, - pearAiModels, } from "../../../src/shared/api" + import { vscode } from "../utils/vscode" import { convertTextMateToHljs } from "../utils/textMateToHljs" import { findLastIndex } from "../../../src/shared/array" @@ -24,6 +23,7 @@ import { Mode, CustomModePrompts, defaultModeSlug, defaultPrompts, ModeConfig } import { CustomSupportPrompts } from "../../../src/shared/support-prompt" import { experimentDefault, ExperimentId } from "../../../src/shared/experiments" import { TelemetrySetting } from "../../../src/shared/TelemetrySetting" +import { PEARAI_URL, pearAiModels } from "../../../src/shared/pearaiApi" export interface ExtensionStateContextType extends ExtensionState { didHydrateState: boolean diff --git a/webview-ui/src/hooks/usePearAiModels.ts b/webview-ui/src/hooks/usePearAiModels.ts index 7974f693597..f9e6fa8f665 100644 --- a/webview-ui/src/hooks/usePearAiModels.ts +++ b/webview-ui/src/hooks/usePearAiModels.ts @@ -1,5 +1,6 @@ import { useState, useEffect } from "react" -import { ModelInfo, pearAiDefaultModelId, pearAiDefaultModelInfo, PEARAI_URL } from "../../../src/shared/api" +import { ModelInfo } from "../../../src/shared/api" +import { pearAiDefaultModelId, pearAiDefaultModelInfo, PEARAI_URL } from "../../../src/shared/pearaiApi" import type { ApiConfiguration } from "../../../src/shared/api" export const usePearAiModels = (apiConfiguration?: ApiConfiguration) => {