Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/types/src/global-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,7 @@ export const SECRET_STATE_KEYS = [
"openAiNativeApiKey",
"deepSeekApiKey",
"moonshotApiKey",
"inceptionApiKey",
"mistralApiKey",
"minimaxApiKey",
"requestyApiKey",
Expand Down
15 changes: 15 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import {
geminiModels,
mistralModels,
moonshotModels,
inceptionModels,
openAiCodexModels,
openAiNativeModels,
qwenCodeModels,
Expand Down Expand Up @@ -111,6 +112,7 @@ export const providerNames = [
"gemini-cli",
"mistral",
"moonshot",
"inception",
"minimax",
"openai-codex",
"openai-native",
Expand Down Expand Up @@ -313,6 +315,11 @@ const moonshotSchema = apiModelIdProviderModelSchema.extend({
moonshotApiKey: z.string().optional(),
})

const inceptionSchema = apiModelIdProviderModelSchema.extend({
inceptionBaseUrl: z.string().optional(),
inceptionApiKey: z.string().optional(),
})

const minimaxSchema = apiModelIdProviderModelSchema.extend({
minimaxBaseUrl: z
.union([z.literal("https://api.minimax.io/v1"), z.literal("https://api.minimaxi.com/v1")])
Expand Down Expand Up @@ -401,6 +408,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
mistralSchema.merge(z.object({ apiProvider: z.literal("mistral") })),
deepSeekSchema.merge(z.object({ apiProvider: z.literal("deepseek") })),
moonshotSchema.merge(z.object({ apiProvider: z.literal("moonshot") })),
inceptionSchema.merge(z.object({ apiProvider: z.literal("inception") })),
minimaxSchema.merge(z.object({ apiProvider: z.literal("minimax") })),
requestySchema.merge(z.object({ apiProvider: z.literal("requesty") })),
unboundSchema.merge(z.object({ apiProvider: z.literal("unbound") })),
Expand Down Expand Up @@ -434,6 +442,7 @@ export const providerSettingsSchema = z.object({
...mistralSchema.shape,
...deepSeekSchema.shape,
...moonshotSchema.shape,
...inceptionSchema.shape,
...minimaxSchema.shape,
...requestySchema.shape,
...unboundSchema.shape,
Expand Down Expand Up @@ -508,6 +517,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
"gemini-cli": "apiModelId",
mistral: "apiModelId",
moonshot: "apiModelId",
inception: "apiModelId",
minimax: "apiModelId",
deepseek: "apiModelId",
"qwen-code": "apiModelId",
Expand Down Expand Up @@ -595,6 +605,11 @@ export const MODELS_BY_PROVIDER: Record<
label: "Moonshot",
models: Object.keys(moonshotModels),
},
inception: {
id: "inception",
label: "Inception",
models: Object.keys(inceptionModels),
},
minimax: {
id: "minimax",
label: "MiniMax",
Expand Down
41 changes: 41 additions & 0 deletions packages/types/src/providers/inception.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import type { ModelInfo } from "../model.js"

// https://docs.inceptionlabs.ai/get-started/models
export type InceptionModelId = keyof typeof inceptionModels

export const inceptionDefaultModelId: InceptionModelId = "mercury-2"

export const inceptionModels = {
"mercury-2": {
maxTokens: 10_000,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: true,
inputPrice: 0.25,
outputPrice: 0.75,
cacheReadsPrice: 0.025,
supportsTemperature: true,
description: "Mercury 2: The fastest reasoning LLM and most powerful model for chat completions",
},
"mercury-edit": {
maxTokens: 1_000,
contextWindow: 32_000,
supportsImages: false,
supportsPromptCache: false,
inputPrice: 0.25,
outputPrice: 0.75,
supportsTemperature: true,
description: "Mercury Edit: A code editing LLM for autocomplete (FIM), apply edit, and next edit suggestions",
},
} as const satisfies Record<string, ModelInfo>

export const inceptionModelInfoSaneDefaults: ModelInfo = {
maxTokens: 10_000,
contextWindow: 128_000,
supportsImages: false,
supportsPromptCache: true,
inputPrice: 0.25,
outputPrice: 0.75,
}

export const INCEPTION_DEFAULT_TEMPERATURE = 0.7
4 changes: 4 additions & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ export * from "./lite-llm.js"
export * from "./lm-studio.js"
export * from "./mistral.js"
export * from "./moonshot.js"
export * from "./inception.js"
export * from "./ollama.js"
export * from "./openai.js"
export * from "./openai-codex.js"
Expand All @@ -34,6 +35,7 @@ import { geminiDefaultModelId } from "./gemini.js"
import { litellmDefaultModelId } from "./lite-llm.js"
import { mistralDefaultModelId } from "./mistral.js"
import { moonshotDefaultModelId } from "./moonshot.js"
import { inceptionDefaultModelId } from "./inception.js"
import { openAiCodexDefaultModelId } from "./openai-codex.js"
import { openRouterDefaultModelId } from "./openrouter.js"
import { qwenCodeDefaultModelId } from "./qwen-code.js"
Expand Down Expand Up @@ -81,6 +83,8 @@ export function getProviderDefaultModelId(
return deepSeekDefaultModelId
case "moonshot":
return moonshotDefaultModelId
case "inception":
return inceptionDefaultModelId
case "minimax":
return minimaxDefaultModelId
case "zai":
Expand Down
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import {
OpenAiNativeHandler,
DeepSeekHandler,
MoonshotHandler,
InceptionHandler,
MistralHandler,
VsCodeLmHandler,
RequestyHandler,
Expand Down Expand Up @@ -146,6 +147,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new QwenCodeHandler(options)
case "moonshot":
return new MoonshotHandler(options)
case "inception":
return new InceptionHandler(options)
case "vscode-lm":
return new VsCodeLmHandler(options)
case "mistral":
Expand Down
166 changes: 166 additions & 0 deletions src/api/providers/__tests__/inception.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
// Mocks must come first, before imports
const mockCreate = vi.fn()
vi.mock("openai", () => {
return {
__esModule: true,
default: vi.fn().mockImplementation(() => ({
chat: {
completions: {
create: mockCreate.mockImplementation(async (options) => {
if (!options.stream) {
return {
id: "test-completion",
choices: [
{
message: { role: "assistant", content: "Test response", refusal: null },
finish_reason: "stop",
index: 0,
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15,
cached_tokens: 2,
},
}
}

// Return async iterator for streaming
return {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [
{
delta: { content: "Test response" },
index: 0,
},
],
usage: null,
}
yield {
choices: [
{
delta: {},
index: 0,
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15,
cached_tokens: 2,
},
}
},
}
}),
},
},
})),
}
})

import OpenAI from "openai"
import type { Anthropic } from "@anthropic-ai/sdk"

import { inceptionDefaultModelId } from "@roo-code/types"

import type { ApiHandlerOptions } from "../../../shared/api"
import { InceptionHandler } from "../inception"

describe("InceptionHandler", () => {
let handler: InceptionHandler
let options: ApiHandlerOptions

beforeEach(() => {
vi.clearAllMocks()
options = {
inceptionApiKey: "test-api-key",
apiModelId: inceptionDefaultModelId,
}
handler = new InceptionHandler(options)
})

describe("constructor", () => {
it("should initialize with default model", () => {
const model = handler.getModel()
expect(model.id).toBe(inceptionDefaultModelId)
})

it("should use custom base URL if provided", () => {
const customOptions = {
...options,
inceptionBaseUrl: "https://custom.api.url/v1",
}
const customHandler = new InceptionHandler(customOptions)
expect(customHandler).toBeDefined()
})
})

describe("getModel", () => {
it("should return model info for mercury-2", () => {
const model = handler.getModel()
expect(model.id).toBe("mercury-2")
expect(model.info.maxTokens).toBe(10_000)
expect(model.info.contextWindow).toBe(128_000)
})

it("should return model info for mercury-edit", () => {
const editOptions = {
...options,
apiModelId: "mercury-edit",
}
const editHandler = new InceptionHandler(editOptions)
const model = editHandler.getModel()
expect(model.id).toBe("mercury-edit")
expect(model.info.maxTokens).toBe(1_000)
expect(model.info.contextWindow).toBe(32_000)
})
})

describe("createMessage", () => {
it("should create a message and yield text chunks", async () => {
const systemPrompt = "You are a helpful assistant"
const messages: Anthropic.MessageParam[] = [
{
role: "user",
content: "Hello",
},
]

const stream = handler.createMessage(systemPrompt, messages)
const chunks: string[] = []

for await (const chunk of stream) {
if (chunk.type === "text") {
chunks.push(chunk.text)
}
}

expect(mockCreate).toHaveBeenCalled()
expect(chunks.join("")).toContain("Test response")
})

it("should handle streaming messages", async () => {
const systemPrompt = "You are a helpful assistant"
const messages: Anthropic.MessageParam[] = [
{
role: "user",
content: "Hello",
},
]

const stream = handler.createMessage(systemPrompt, messages)
const chunks: string[] = []

for await (const chunk of stream) {
if (chunk.type === "text") {
chunks.push(chunk.text)
}
}

expect(chunks).toContain("Test response")
})
})
})
38 changes: 38 additions & 0 deletions src/api/providers/inception.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import { inceptionModels, inceptionDefaultModelId, inceptionModelInfoSaneDefaults, INCEPTION_DEFAULT_TEMPERATURE, type ModelInfo } from "@roo-code/types"
import type { ApiHandlerOptions } from "../../shared/api"
import { getModelParams } from "../transform/model-params"
import { OpenAICompatibleHandler, OpenAICompatibleConfig } from "./openai-compatible"

export class InceptionHandler extends OpenAICompatibleHandler {
constructor(options: ApiHandlerOptions) {
const modelId = options.apiModelId ?? inceptionDefaultModelId
const modelInfo = inceptionModels[modelId as keyof typeof inceptionModels] || inceptionModelInfoSaneDefaults

const config: OpenAICompatibleConfig = {
providerName: "inception",
baseURL: options.inceptionBaseUrl || "https://api.inceptionlabs.ai/v1",
apiKey: options.inceptionApiKey ?? "not-provided",
modelId,
modelInfo,
modelMaxTokens: options.modelMaxTokens ?? undefined,
temperature: options.modelTemperature ?? undefined,
}

super(options, config)
}

override getModel() {
const id = this.options.apiModelId ?? inceptionDefaultModelId
const info = inceptionModels[id as keyof typeof inceptionModels] || inceptionModelInfoSaneDefaults

const params = getModelParams({
format: "openai",
modelId: id,
model: info,
settings: this.options,
defaultTemperature: INCEPTION_DEFAULT_TEMPERATURE,
})

return { id, info, ...params }
}
}
1 change: 1 addition & 0 deletions src/api/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ export { AnthropicHandler } from "./anthropic"
export { AwsBedrockHandler } from "./bedrock"
export { DeepSeekHandler } from "./deepseek"
export { MoonshotHandler } from "./moonshot"
export { InceptionHandler } from "./inception"
export { FakeAIHandler } from "./fake-ai"
export { GeminiHandler } from "./gemini"
export { LiteLLMHandler } from "./lite-llm"
Expand Down
1 change: 1 addition & 0 deletions src/shared/__tests__/checkExistApiConfig.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ describe("checkExistKey", () => {
openAiNativeApiKey: undefined,
deepSeekApiKey: undefined,
moonshotApiKey: undefined,
inceptionApiKey: undefined,
mistralApiKey: undefined,
vsCodeLmModelSelector: undefined,
requestyApiKey: undefined,
Expand Down
Loading
Loading