Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/en_US/release_notes_9_14.rst
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,5 @@ Bug fixes

| `Issue #9279 <https://github.com/pgadmin-org/pgadmin4/issues/9279>`_ - Fixed an issue where OAuth2 authentication fails with 'object has no attribute' if OAUTH2_AUTO_CREATE_USER is False.
| `Issue #9392 <https://github.com/pgadmin-org/pgadmin4/issues/9392>`_ - Ensure that the Geometry Viewer refreshes when re-running queries or switching geometry columns, preventing stale data from being displayed.
| `Issue #9719 <https://github.com/pgadmin-org/pgadmin4/issues/9719>`_ - Fixed an issue where AI Reports fail with OpenAI models that do not support the temperature parameter.
| `Issue #9721 <https://github.com/pgadmin-org/pgadmin4/issues/9721>`_ - Fixed an issue where permissions page is not completely accessible on full scroll.
2 changes: 0 additions & 2 deletions web/pgadmin/llm/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def chat(
tools: Optional[list[Tool]] = None,
system_prompt: Optional[str] = None,
max_tokens: int = 4096,
temperature: float = 0.0,
**kwargs
) -> LLMResponse:
"""
Expand All @@ -65,7 +64,6 @@ def chat(
tools: Optional list of tools the LLM can use.
system_prompt: Optional system prompt to set context.
max_tokens: Maximum tokens in the response.
temperature: Sampling temperature (0.0 = deterministic).
**kwargs: Additional provider-specific parameters.

Returns:
Expand Down
5 changes: 0 additions & 5 deletions web/pgadmin/llm/providers/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ def chat(
tools: Optional[list[Tool]] = None,
system_prompt: Optional[str] = None,
max_tokens: int = 4096,
temperature: float = 0.0,
**kwargs
) -> LLMResponse:
"""
Expand All @@ -88,7 +87,6 @@ def chat(
tools: Optional list of tools Claude can use.
system_prompt: Optional system prompt.
max_tokens: Maximum tokens in response.
temperature: Sampling temperature.
**kwargs: Additional parameters.

Returns:
Expand All @@ -107,9 +105,6 @@ def chat(
if system_prompt:
payload['system'] = system_prompt

if temperature > 0:
payload['temperature'] = temperature

if tools:
payload['tools'] = self._convert_tools(tools)

Expand Down
3 changes: 0 additions & 3 deletions web/pgadmin/llm/providers/docker.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ def chat(
tools: Optional[list[Tool]] = None,
system_prompt: Optional[str] = None,
max_tokens: int = 4096,
temperature: float = 0.0,
**kwargs
) -> LLMResponse:
"""
Expand All @@ -94,7 +93,6 @@ def chat(
tools: Optional list of tools the model can use.
system_prompt: Optional system prompt.
max_tokens: Maximum tokens in response.
temperature: Sampling temperature.
**kwargs: Additional parameters.

Returns:
Expand All @@ -117,7 +115,6 @@ def chat(
'model': self._model,
'messages': converted_messages,
'max_completion_tokens': max_tokens,
'temperature': temperature
}

if tools:
Expand Down
3 changes: 0 additions & 3 deletions web/pgadmin/llm/providers/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ def chat(
tools: Optional[list[Tool]] = None,
system_prompt: Optional[str] = None,
max_tokens: int = 4096,
temperature: float = 0.0,
**kwargs
) -> LLMResponse:
"""
Expand All @@ -92,7 +91,6 @@ def chat(
tools: Optional list of tools the model can use.
system_prompt: Optional system prompt.
max_tokens: Maximum tokens in response (num_predict in Ollama).
temperature: Sampling temperature.
**kwargs: Additional parameters.

Returns:
Expand All @@ -117,7 +115,6 @@ def chat(
'stream': False,
'options': {
'num_predict': max_tokens,
'temperature': temperature
}
}

Expand Down
3 changes: 0 additions & 3 deletions web/pgadmin/llm/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ def chat(
tools: Optional[list[Tool]] = None,
system_prompt: Optional[str] = None,
max_tokens: int = 4096,
temperature: float = 0.0,
**kwargs
) -> LLMResponse:
"""
Expand All @@ -88,7 +87,6 @@ def chat(
tools: Optional list of tools the model can use.
system_prompt: Optional system prompt.
max_tokens: Maximum tokens in response.
temperature: Sampling temperature.
**kwargs: Additional parameters.

Returns:
Expand All @@ -111,7 +109,6 @@ def chat(
'model': self._model,
'messages': converted_messages,
'max_completion_tokens': max_tokens,
'temperature': temperature
}

if tools:
Expand Down
16 changes: 5 additions & 11 deletions web/pgadmin/llm/reports/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,7 @@ def _planning_stage(self, context: dict) -> list[str]:
response = self._call_llm_with_retry(
messages=[Message.user(user_prompt)],
system_prompt=PLANNING_SYSTEM_PROMPT,
max_tokens=500,
temperature=0.0
max_tokens=500
)

# Parse JSON response
Expand Down Expand Up @@ -292,8 +291,7 @@ def _analyze_section_with_retry(
response = self.client.chat(
messages=[Message.user(user_prompt)],
system_prompt=SECTION_ANALYSIS_SYSTEM_PROMPT,
max_tokens=1500,
temperature=0.3
max_tokens=1500
)

# Determine severity from content
Expand Down Expand Up @@ -374,8 +372,7 @@ def _synthesize_with_retry(
response = self.client.chat(
messages=[Message.user(user_prompt)],
system_prompt=SYNTHESIS_SYSTEM_PROMPT,
max_tokens=4096,
temperature=0.3
max_tokens=4096
)

yield {'type': 'result', 'result': response.content}
Expand Down Expand Up @@ -408,16 +405,14 @@ def _call_llm_with_retry(
self,
messages: list[Message],
system_prompt: str,
max_tokens: int = 4096,
temperature: float = 0.3
max_tokens: int = 4096
):
"""Call LLM with exponential backoff retry.

Args:
messages: Messages to send.
system_prompt: System prompt.
max_tokens: Maximum response tokens.
temperature: Sampling temperature.

Returns:
LLMResponse from the client.
Expand All @@ -430,8 +425,7 @@ def _call_llm_with_retry(
return self.client.chat(
messages=messages,
system_prompt=system_prompt,
max_tokens=max_tokens,
temperature=temperature
max_tokens=max_tokens
)
except LLMClientError as e:
if e.error.retryable and attempt < self.max_retries - 1:
Expand Down
Loading