Skip to content

Commit d34dd91

Browse files
Merge pull request #120 from VirdocsSoftware/develop
feat: 🎸 update model and token size (#119)
2 parents 41edf77 + 817ce87 commit d34dd91

4 files changed

Lines changed: 14 additions & 6 deletions

File tree

.github/actions/auto-pr-description/generate_pr_description.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ ${diffContent}`;
107107
* Call Gemini API with the given prompt
108108
*/
109109
async function callGeminiAPI(prompt, apiKey) {
110-
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKey}`, {
110+
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent?key=${apiKey}`, {
111111
method: 'POST',
112112
headers: { 'Content-Type': 'application/json' },
113113
body: JSON.stringify({
@@ -120,7 +120,7 @@ async function callGeminiAPI(prompt, apiKey) {
120120
temperature: 0.7,
121121
topK: 40,
122122
topP: 0.95,
123-
maxOutputTokens: 2048,
123+
maxOutputTokens: 8192,
124124
}
125125
})
126126
});

.github/actions/auto-release-description/generate_pr_description.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ Keep it concise and focused on the most important changes.`;
4343
const combinedPrompt = `${promptTemplate}\n\nHere is the git diff:\n\n${diffContent}`;
4444

4545
try {
46-
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKey}`, {
46+
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent?key=${apiKey}`, {
4747
method: 'POST',
4848
headers: { 'Content-Type': 'application/json' },
4949
body: JSON.stringify({
@@ -56,7 +56,7 @@ Keep it concise and focused on the most important changes.`;
5656
temperature: 0.7,
5757
topK: 40,
5858
topP: 0.95,
59-
maxOutputTokens: 1024,
59+
maxOutputTokens: 8192,
6060
}
6161
})
6262
});

.github/actions/confluence-release-notes/generate_release_notes.js

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ class GeminiClient {
205205
const command =
206206
'Categorize the following as either Improvement or Feature and output to json using schema \'{"categories":[{"ticket_id":"$key","category":"$category"}]}\'';
207207
const url =
208-
"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-8b:generateContent?key=" +
208+
"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent?key=" +
209209
this.api_key;
210210
const body = JSON.stringify({
211211
contents: [
@@ -217,6 +217,9 @@ class GeminiClient {
217217
],
218218
},
219219
],
220+
generationConfig: {
221+
maxOutputTokens: 8192,
222+
},
220223
});
221224
const response = await this.fetch(url, {
222225
method: "POST",

.github/actions/llm/invoke_gemini.js

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
import { GoogleGenerativeAI } from "@google/generative-ai";
22
const genAI = new GoogleGenerativeAI(process.env.API_KEY);
3-
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
3+
const model = genAI.getGenerativeModel({
4+
model: "gemini-2.0-flash-exp",
5+
generationConfig: {
6+
maxOutputTokens: 8192,
7+
}
8+
});
49
const prompt = process.env.LLM_PROMPT;
510

611
try {

0 commit comments

Comments
 (0)