diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 000000000..c4796f5b8
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,14 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.{js,jsx,ts,tsx,json}]
+indent_style = space
+indent_size = 2
+
+[*.md]
+trim_trailing_whitespace = false
\ No newline at end of file
diff --git a/.github/workflows/docker-deploy.yml b/.github/workflows/docker-deploy.yml
index 82107df0c..9d04c8913 100644
--- a/.github/workflows/docker-deploy.yml
+++ b/.github/workflows/docker-deploy.yml
@@ -15,6 +15,11 @@ on:
description: 'runner array in json format (e.g. ["ubuntu-latest"] or ["self-hosted"])'
required: true
default: '[]'
+ app_version:
+ description: 'Docker image tag to build and deploy (e.g. v1.7.1)'
+ required: true
+ default: 'latest'
+ type: string
jobs:
build-main:
@@ -23,7 +28,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Build main application image
- run: docker build --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua -t nexent/nexent -f make/main/Dockerfile .
+ run: docker build --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua -t nexent/nexent:${{ github.event.inputs.app_version }} -t nexent/nexent -f make/main/Dockerfile .
build-data-process:
runs-on: ${{ fromJson(inputs.runner_label_json) }}
@@ -47,7 +52,7 @@ jobs:
GIT_TRACE=1 GIT_CURL_VERBOSE=1 GIT_LFS_LOG=debug git lfs pull
rm -rf .git .gitattributes
- name: Build data process image
- run: docker build --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua -t nexent/nexent-data-process -f make/data_process/Dockerfile .
+ run: docker build --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua -t nexent/nexent-data-process:${{ github.event.inputs.app_version }} -t nexent/nexent-data-process -f make/data_process/Dockerfile .
build-web:
runs-on: ${{ fromJson(inputs.runner_label_json) }}
@@ -55,7 +60,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Build web frontend image
- run: docker build --build-arg MIRROR=https://registry.npmmirror.com --build-arg APK_MIRROR=tsinghua -t nexent/nexent-web -f make/web/Dockerfile .
+ run: docker build --build-arg MIRROR=https://registry.npmmirror.com --build-arg APK_MIRROR=tsinghua -t nexent/nexent-web:${{ github.event.inputs.app_version }} -t nexent/nexent-web -f make/web/Dockerfile .
build-docs:
runs-on: ${{ fromJson(inputs.runner_label_json) }}
@@ -63,7 +68,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Build docs image
- run: docker build --progress=plain -t nexent/nexent-docs -f make/docs/Dockerfile .
+ run: docker build --progress=plain -t nexent/nexent-docs:${{ github.event.inputs.app_version }} -t nexent/nexent-docs -f make/docs/Dockerfile .
deploy:
runs-on: ${{ fromJson(inputs.runner_label_json) }}
@@ -76,6 +81,9 @@ jobs:
rm -rf $HOME/nexent
mkdir -p $HOME/nexent
cp -r $GITHUB_WORKSPACE/* $HOME/nexent/
+ - name: Force APP_VERSION to latest in deploy.sh (CI only)
+ run: |
+ sed -i 's/APP_VERSION="$(get_app_version)"/APP_VERSION="${{ github.event.inputs.app_version }}"/' $HOME/nexent/docker/deploy.sh
- name: Start docs container
run: |
docker stop nexent-docs 2>/dev/null || true
diff --git a/backend/config_service.py b/backend/config_service.py
index f98c7b155..ebe5b7593 100644
--- a/backend/config_service.py
+++ b/backend/config_service.py
@@ -1,7 +1,6 @@
import uvicorn
import logging
import warnings
-import asyncio
from consts.const import APP_VERSION
@@ -12,30 +11,14 @@
from apps.config_app import app
from utils.logging_utils import configure_logging, configure_elasticsearch_logging
-from services.tool_configuration_service import initialize_tools_on_startup
+
configure_logging(logging.INFO)
configure_elasticsearch_logging()
logger = logging.getLogger("config_service")
-async def startup_initialization():
- """
- Perform initialization tasks during server startup
- """
+if __name__ == "__main__":
logger.info("Starting server initialization...")
logger.info(f"APP version is: {APP_VERSION}")
- try:
- # Initialize tools on startup - service layer handles detailed logging
- await initialize_tools_on_startup()
- logger.info("Server initialization completed successfully!")
-
- except Exception as e:
- logger.error(f"Server initialization failed: {str(e)}")
- # Don't raise the exception to allow server to start even if initialization fails
- logger.warning("Server will continue to start despite initialization issues")
-
-
-if __name__ == "__main__":
- asyncio.run(startup_initialization())
uvicorn.run(app, host="0.0.0.0", port=5010, log_level="info")
diff --git a/backend/consts/const.py b/backend/consts/const.py
index 0b5f4bcef..10d977219 100644
--- a/backend/consts/const.py
+++ b/backend/consts/const.py
@@ -285,4 +285,4 @@ class VectorDatabaseType(str, Enum):
# APP Version
-APP_VERSION = "v1.7.8"
+APP_VERSION = "v1.7.8.1"
diff --git a/backend/runtime_service.py b/backend/runtime_service.py
index faa3d2981..9fd42d9a7 100644
--- a/backend/runtime_service.py
+++ b/backend/runtime_service.py
@@ -1,7 +1,6 @@
import uvicorn
import logging
import warnings
-import asyncio
from consts.const import APP_VERSION
@@ -12,31 +11,16 @@
from apps.runtime_app import app
from utils.logging_utils import configure_logging, configure_elasticsearch_logging
-from services.tool_configuration_service import initialize_tools_on_startup
+
configure_logging(logging.INFO)
configure_elasticsearch_logging()
logger = logging.getLogger("runtime_service")
-async def startup_initialization():
- """
- Perform initialization tasks during server startup
- """
+if __name__ == "__main__":
logger.info("Starting server initialization...")
logger.info(f"APP version is: {APP_VERSION}")
- try:
- # Initialize tools on startup - service layer handles detailed logging
- await initialize_tools_on_startup()
- logger.info("Server initialization completed successfully!")
- except Exception as e:
- logger.error(f"Server initialization failed: {str(e)}")
- # Don't raise the exception to allow server to start even if initialization fails
- logger.warning("Server will continue to start despite initialization issues")
-
-
-if __name__ == "__main__":
- asyncio.run(startup_initialization())
uvicorn.run(app, host="0.0.0.0", port=5014, log_level="info")
diff --git a/backend/services/tool_configuration_service.py b/backend/services/tool_configuration_service.py
index 66298c8c5..97a386a29 100644
--- a/backend/services/tool_configuration_service.py
+++ b/backend/services/tool_configuration_service.py
@@ -11,7 +11,7 @@
import jsonref
from mcpadapt.smolagents_adapter import _sanitize_function_name
-from consts.const import DEFAULT_USER_ID, LOCAL_MCP_SERVER, DATA_PROCESS_SERVICE
+from consts.const import LOCAL_MCP_SERVER, DATA_PROCESS_SERVICE
from consts.exceptions import MCPConnectionError, ToolExecutionException, NotFoundException
from consts.model import ToolInstanceInfoRequest, ToolInfo, ToolSourceEnum, ToolValidateRequest
from database.remote_mcp_db import get_mcp_records_by_tenant, get_mcp_server_by_name_and_tenant
@@ -22,7 +22,6 @@
update_tool_table_from_scan_tool_list,
search_last_tool_instance_by_tool_id,
)
-from database.user_tenant_db import get_all_tenant_ids
from services.file_management_service import get_llm_model
from services.vectordatabase_service import get_embedding_model, get_vector_db_core
from services.tenant_config_service import get_selected_knowledge_list, build_knowledge_name_mapping
@@ -367,71 +366,6 @@ async def list_all_tools(tenant_id: str):
return formatted_tools
-async def initialize_tools_on_startup():
- """
- Initialize and scan all tools during server startup for all tenants
-
- This function scans all available tools (local, LangChain, and MCP)
- and updates the database with the latest tool information for all tenants.
- """
-
- logger.info("Starting tool initialization on server startup...")
-
- try:
- # Get all tenant IDs from the database
- tenant_ids = get_all_tenant_ids()
-
- if not tenant_ids:
- logger.warning("No tenants found in database, skipping tool initialization")
- return
-
- logger.info(f"Found {len(tenant_ids)} tenants: {tenant_ids}")
-
- total_tools = 0
- successful_tenants = 0
- failed_tenants = []
-
- # Process each tenant
- for tenant_id in tenant_ids:
- try:
- logger.info(f"Initializing tools for tenant: {tenant_id}")
-
- # Add timeout to prevent hanging during startup
- try:
- await asyncio.wait_for(
- update_tool_list(tenant_id=tenant_id, user_id=DEFAULT_USER_ID),
- timeout=60.0 # 60 seconds timeout per tenant
- )
-
- # Get the count of tools for this tenant
- tools_info = query_all_tools(tenant_id)
- tenant_tool_count = len(tools_info)
- total_tools += tenant_tool_count
- successful_tenants += 1
-
- logger.info(f"Tenant {tenant_id}: {tenant_tool_count} tools initialized")
-
- except asyncio.TimeoutError:
- logger.error(f"Tool initialization timed out for tenant {tenant_id}")
- failed_tenants.append(f"{tenant_id} (timeout)")
-
- except Exception as e:
- logger.error(f"Tool initialization failed for tenant {tenant_id}: {str(e)}")
- failed_tenants.append(f"{tenant_id} (error: {str(e)})")
-
- # Log final results
- logger.info("Tool initialization completed!")
- logger.info(f"Total tools available across all tenants: {total_tools}")
- logger.info(f"Successfully processed: {successful_tenants}/{len(tenant_ids)} tenants")
-
- if failed_tenants:
- logger.warning(f"Failed tenants: {', '.join(failed_tenants)}")
-
- except Exception as e:
- logger.error(f"❌ Tool initialization failed: {str(e)}")
- raise
-
-
def load_last_tool_config_impl(tool_id: int, tenant_id: str, user_id: str):
"""
Load the last tool configuration for a given tool ID
diff --git a/backend/utils/llm_utils.py b/backend/utils/llm_utils.py
index 2e1590498..e74ef91b3 100644
--- a/backend/utils/llm_utils.py
+++ b/backend/utils/llm_utils.py
@@ -18,16 +18,37 @@ def _process_thinking_tokens(
) -> bool:
"""
Process tokens to filter out thinking content between and tags.
+ Handles cases where providers only send a closing tag or mix reasoning_content.
"""
- if is_thinking:
- return THINK_END_PATTERN not in new_token
+ # Check for end tag first, as it might appear in the same token as start tag
+ if THINK_END_PATTERN in new_token:
+ # If we were never in think mode, treat everything accumulated so far as reasoning and clear it
+ if not is_thinking:
+ token_join.clear()
+ if callback:
+ callback("") # clear any previously streamed reasoning content
+
+ # Exit thinking mode and only keep content after
+ _, _, after_end = new_token.partition(THINK_END_PATTERN)
+ is_thinking = False
+ new_token = after_end
+ # Continue processing the remaining content in this token
+ # Check for start tag (after processing end tag, in case both are in the same token)
if THINK_START_PATTERN in new_token:
+ # Drop any content before and switch to thinking mode
+ _, _, after_start = new_token.partition(THINK_START_PATTERN)
+ new_token = after_start
+ is_thinking = True
+
+ if is_thinking:
+ # Still inside thinking content; ignore until we exit
return True
- token_join.append(new_token)
- if callback:
- callback("".join(token_join))
+ if new_token:
+ token_join.append(new_token)
+ if callback:
+ callback("".join(token_join))
return False
@@ -46,8 +67,8 @@ def call_llm_for_system_prompt(
llm = OpenAIModel(
model_id=get_model_name_from_config(llm_model_config) if llm_model_config else "",
- api_base=llm_model_config.get("base_url", ""),
- api_key=llm_model_config.get("api_key", ""),
+ api_base=llm_model_config.get("base_url", "") if llm_model_config else "",
+ api_key=llm_model_config.get("api_key", "") if llm_model_config else "",
temperature=0.3,
top_p=0.95,
)
@@ -65,16 +86,38 @@ def call_llm_for_system_prompt(
current_request = llm.client.chat.completions.create(stream=True, **completion_kwargs)
token_join: List[str] = []
is_thinking = False
+ reasoning_content_seen = False
+ content_tokens_seen = 0
for chunk in current_request:
- new_token = chunk.choices[0].delta.content
+ delta = chunk.choices[0].delta
+ reasoning_content = getattr(delta, "reasoning_content", None)
+ new_token = delta.content
+
+ # Note: reasoning_content is separate metadata and doesn't affect content filtering
+ # We only filter content based on tags in delta.content
+ if reasoning_content:
+ reasoning_content_seen = True
+ logger.debug("Received reasoning_content (metadata only, not filtering content)")
+
+ # Process content token if it exists
if new_token is not None:
+ content_tokens_seen += 1
is_thinking = _process_thinking_tokens(
new_token,
is_thinking,
token_join,
callback,
)
- return "".join(token_join)
+
+ result = "".join(token_join)
+ if not result and content_tokens_seen > 0:
+ logger.warning(
+ "Generated prompt is empty but %d content tokens were processed. "
+ "This suggests all content was filtered out.",
+ content_tokens_seen
+ )
+
+ return result
except Exception as exc:
logger.error("Failed to generate prompt from LLM: %s", str(exc))
raise
diff --git a/doc/docs/.vitepress/config.mts b/doc/docs/.vitepress/config.mts
index 059937f09..884fa28bb 100644
--- a/doc/docs/.vitepress/config.mts
+++ b/doc/docs/.vitepress/config.mts
@@ -1,4 +1,4 @@
-// https://vitepress.dev/reference/site-config
+// https://vitepress.dev/reference/site-config
import { defineConfig } from "vitepress";
export default defineConfig({
@@ -89,6 +89,8 @@ export default defineConfig({
text: "Knowledge Base",
link: "/en/user-guide/knowledge-base",
},
+ { text: "MCP Tools", link: "/en/user-guide/mcp-tools" },
+ { text: "Monitoring & Ops", link: "/en/user-guide/monitor" },
{
text: "Model Management",
link: "/en/user-guide/model-management",
@@ -267,7 +269,7 @@ export default defineConfig({
items: [
{ text: "首页", link: "/zh/user-guide/home-page" },
{ text: "开始问答", link: "/zh/user-guide/start-chat" },
- { text: "快速设置", link: "/zh/user-guide/quick-setup" },
+ { text: "快速配置", link: "/zh/user-guide/quick-setup" },
{ text: "智能体空间", link: "/zh/user-guide/agent-space" },
{ text: "智能体市场", link: "/zh/user-guide/agent-market" },
{
@@ -278,6 +280,8 @@ export default defineConfig({
text: "知识库",
link: "/zh/user-guide/knowledge-base",
},
+ { text: "MCP工具", link: "/zh/user-guide/mcp-tools" },
+ { text: "监控与运维", link: "/zh/user-guide/monitor" },
{ text: "模型管理", link: "/zh/user-guide/model-management" },
{ text: "记忆管理", link: "/zh/user-guide/memory-management" },
{ text: "用户管理", link: "/zh/user-guide/user-management" },
diff --git a/doc/docs/en/user-guide/agent-development.md b/doc/docs/en/user-guide/agent-development.md
index bc7d6c42d..9f2f633fa 100644
--- a/doc/docs/en/user-guide/agent-development.md
+++ b/doc/docs/en/user-guide/agent-development.md
@@ -19,6 +19,8 @@ If you have an existing agent configuration, you can also import it:
> - **Import anyway**: Keep the duplicate name; the imported agent will be in an unavailable state and requires manual modification of the Agent name and variable name before it can be used
> - **Regenerate and import**: The system will call the LLM to rename the Agent, which will consume a certain amount of model tokens and may take longer
+> 📌 **Important:** For agents created via import, if their tools include `knowledge_base_search` or other knowledge base search tools, these tools will only search **knowledge bases that the currently logged-in user is allowed to access in this environment**. The original knowledge base configuration in the exported agent will *not* be automatically inherited, so actual search results and answer quality may differ from what the original author observed.
+
@@ -40,7 +42,7 @@ You can configure other collaborative agents for your created agent, as well as
### 🛠️ Select Agent Tools
-Agents can use various tools to complete tasks, such as knowledge base search, email sending, file management, and other local tools. They can also integrate third-party MCP tools or custom tools.
+Agents can use various tools to complete tasks, such as knowledge base search, file parsing, image parsing, email sending/receiving, file management, and other local tools. They can also integrate third-party MCP tools or custom tools.
1. On the "Select Tools" tab, click "Refresh Tools" to update the available tool list
2. Select the group containing the tool you want to add
@@ -65,7 +67,7 @@ Agents can use various tools to complete tasks, such as knowledge base search, e
Nexent allows you to quickly and easily use third-party MCP tools to enrich agent capabilities.
1. On the "Select Agent Tools" tab, click "MCP Config" to configure MCP servers in the popup and view configured servers
-2. Enter the server name and URL (currently only SSE protocol is supported)
+2. Enter the server name and server URL (supports SSE and Streamable HTTP protocols)
- ⚠️ **Note:** The server name must contain only English letters or digits; spaces, underscores, and other characters are not allowed.
3. Click "Add" to complete the addition
@@ -111,7 +113,7 @@ Nexent provides a "Tool Testing" capability for all types of tools—whether the
Based on the selected collaborative agents and tools, you can now describe in simple language how you expect this agent to work. Nexent will automatically generate the agent name, description, and prompts based on your configuration and description.
1. In the editor under "Describe how should this agent work", enter a brief description, such as "You are a professional knowledge Q&A assistant with local knowledge search and online search capabilities, synthesizing information to answer user questions"
-2. Click the "Generate" button, and Nexent will generate detailed agent content for you, including basic information and prompts (role, usage requirements, few shots)
+2. Select a model (choose a smarter model when generating prompts to optimize response logic), click the "Generate Agent" button, and Nexent will generate detailed agent content for you, including basic information and prompts (role, usage requirements, examples)
3. You can edit and fine-tune the auto-generated content (especially the prompts) in the Agent Detail Content below
@@ -144,6 +146,10 @@ View the collaborative agents/tools used by the agent, displayed in a tree diagr
Export successfully debugged agents as JSON configuration files. You can use this JSON file to create a copy by importing it when creating an agent.
+### 📋 Copy
+
+Copy an agent to facilitate experimentation, multi-version debugging, and parallel development.
+
### 🗑️ Delete
Delete an agent (this cannot be undone, please proceed with caution).
diff --git a/doc/docs/en/user-guide/agent-market.md b/doc/docs/en/user-guide/agent-market.md
index 231aa09aa..6fdd8cf84 100644
--- a/doc/docs/en/user-guide/agent-market.md
+++ b/doc/docs/en/user-guide/agent-market.md
@@ -1,37 +1,63 @@
# Agent Market
-Agent Market is an upcoming Nexent module that will provide a curated catalog of ready-to-use agents.
+🎁 Here you'll find high-quality agents created by **Nexent Official** and **community creators**
-## 🎯 Coming Features
+You can use them directly to complete specific tasks, or incorporate them as sub-agents into your own agents
-Agent Market will let you:
+
-- **Browse agents** – Explore featured and community-built agents.
-- **Install instantly** – Add high-quality agents to your workspace with one click.
-- **Share your work** – Publish the agents you build.
-- **Rate & give feedback** – Help the community discover great agents.
+## 🔍 Explore and Discover
-## ⏳ Stay Tuned
+You can quickly find the best agents through the following methods:
-Agent Market is currently under development. We’re building an ecosystem where you can:
+1. Browse or search by use case category
+2. View agent feature descriptions to confirm if they meet your needs 🆗
+3. Check built-in tools to confirm if they are ready or available ✅
-- Quickly access verified agents.
-- Share your creations with the community.
-- Discover new use cases and inspiration.
+
-## 📢 Follow Updates
-Want to know when Agent Market launches?
+
-- Join the [Discord community](https://discord.gg/tb5H3S3wyv) for announcements.
-- Track project updates in the repository.
+
+
+
+## 🔧 Install Agents
+
+Select your preferred agent, download with one click, and add it to your agent space immediately
+
+### 1️⃣ Select Models
+
+🌟 Confirm model availability
+
+✍️ Configure the same model for all agents uniformly, or select appropriate models for the main agent and sub-agents separately
+
+
+
+### 2️⃣ Configure Fields
+
+🔑 Fill in tool permissions as prompted
+
+
+
+After installation, your agent will be ready in **[Agent Space](./agent-space)**
+
+## 📢 Share Your Creations
+
+Created an excellent agent? 👍
+
+Welcome to share your work in [GitHub Discussions](https://github.com/ModelEngine-Group/nexent/discussions), and we'll contact you as soon as possible to let more people see and use it!
## 🚀 Related Features
-While Agent Market is being built, you can:
+While waiting for the Agent Market to launch, you can:
-1. Manage your agents in **[Agent Space](./agent-space)**.
-2. Create new agents in **[Agent Development](./agent-development)**.
-3. Test agents in **[Start Chat](./start-chat)**.
+1. Manage your own agents in **[Agent Space](./agent-space)**
+2. Create custom agents through **[Agent Development](./agent-development)**
+3. Experience the powerful features of agents in **[Start Chat](./start-chat)**
-Need help? Check the **[FAQ](../quick-start/faq)** or open a thread in [GitHub Discussions](https://github.com/ModelEngine-Group/nexent/discussions).
+If you encounter any issues during use, please refer to our **[FAQ](../quick-start/faq)** or ask for support in [GitHub Discussions](https://github.com/ModelEngine-Group/nexent/discussions).
diff --git a/doc/docs/en/user-guide/agent-space.md b/doc/docs/en/user-guide/agent-space.md
index cd3f66fce..56f77a3de 100644
--- a/doc/docs/en/user-guide/agent-space.md
+++ b/doc/docs/en/user-guide/agent-space.md
@@ -46,6 +46,11 @@ Click a card to open its details:
1. Click **Export** on the card.
2. Nexent downloads a JSON configuration file you can import later.
+### Copy an Agent
+
+1. Click **Copy** on the card to duplicate the agent.
+2. This facilitates experimentation, multi-version debugging, and parallel development.
+
### View Relationships
1. Click **View Relationships** to see how the agent interacts with tools and other agents.
diff --git a/doc/docs/en/user-guide/assets/agent-development/generate-agent.png b/doc/docs/en/user-guide/assets/agent-development/generate-agent.png
index e1f93eeb6..876c42e18 100644
Binary files a/doc/docs/en/user-guide/assets/agent-development/generate-agent.png and b/doc/docs/en/user-guide/assets/agent-development/generate-agent.png differ
diff --git a/doc/docs/en/user-guide/assets/agent-market/agent-market-detail.png b/doc/docs/en/user-guide/assets/agent-market/agent-market-detail.png
new file mode 100644
index 000000000..b7127a049
Binary files /dev/null and b/doc/docs/en/user-guide/assets/agent-market/agent-market-detail.png differ
diff --git a/doc/docs/en/user-guide/assets/agent-market/agent-market-detail2.png b/doc/docs/en/user-guide/assets/agent-market/agent-market-detail2.png
new file mode 100644
index 000000000..3d7baf654
Binary files /dev/null and b/doc/docs/en/user-guide/assets/agent-market/agent-market-detail2.png differ
diff --git a/doc/docs/en/user-guide/assets/agent-market/agent-market-download.png b/doc/docs/en/user-guide/assets/agent-market/agent-market-download.png
new file mode 100644
index 000000000..2f258676d
Binary files /dev/null and b/doc/docs/en/user-guide/assets/agent-market/agent-market-download.png differ
diff --git a/doc/docs/en/user-guide/assets/agent-market/agent-market-download2.png b/doc/docs/en/user-guide/assets/agent-market/agent-market-download2.png
new file mode 100644
index 000000000..4bf6d9491
Binary files /dev/null and b/doc/docs/en/user-guide/assets/agent-market/agent-market-download2.png differ
diff --git a/doc/docs/en/user-guide/assets/agent-market/agent-market.png b/doc/docs/en/user-guide/assets/agent-market/agent-market.png
new file mode 100644
index 000000000..d8e71e014
Binary files /dev/null and b/doc/docs/en/user-guide/assets/agent-market/agent-market.png differ
diff --git a/doc/docs/en/user-guide/assets/agent-space/agent-space.png b/doc/docs/en/user-guide/assets/agent-space/agent-space.png
index 952596236..b43f00d21 100644
Binary files a/doc/docs/en/user-guide/assets/agent-space/agent-space.png and b/doc/docs/en/user-guide/assets/agent-space/agent-space.png differ
diff --git a/doc/docs/en/user-guide/assets/home-page/homepage.png b/doc/docs/en/user-guide/assets/home-page/homepage.png
index c6bc61a82..cb00c9561 100644
Binary files a/doc/docs/en/user-guide/assets/home-page/homepage.png and b/doc/docs/en/user-guide/assets/home-page/homepage.png differ
diff --git a/doc/docs/en/user-guide/assets/knowledge-base/tip.png b/doc/docs/en/user-guide/assets/knowledge-base/tip.png
new file mode 100644
index 000000000..6e23f6102
Binary files /dev/null and b/doc/docs/en/user-guide/assets/knowledge-base/tip.png differ
diff --git a/doc/docs/en/user-guide/assets/model-management/vector-model.png b/doc/docs/en/user-guide/assets/model-management/vector-model.png
new file mode 100644
index 000000000..c41097279
Binary files /dev/null and b/doc/docs/en/user-guide/assets/model-management/vector-model.png differ
diff --git a/doc/docs/en/user-guide/home-page.md b/doc/docs/en/user-guide/home-page.md
index d4fdde989..61d457b18 100644
--- a/doc/docs/en/user-guide/home-page.md
+++ b/doc/docs/en/user-guide/home-page.md
@@ -26,12 +26,14 @@ The left sidebar exposes every major module:
- **Start Chat** – Open the chat interface.
- **Quick Setup** – Complete the recommended setup flow (Models → Knowledge Base → Agent).
- **Agent Space** – Manage all existing agents.
-- **Agent Market** – Discover and install published agents (coming soon).
+- **Agent Market** – Discover and install published agents.
- **Agent Development** – Create and configure agents.
-- **Knowledge Base** – Upload and curate documents.
-- **Model Management** – Configure app info and connect models.
-- **Memory Management** – Enable and tune the multi-layer memory system.
-- **User Management** – Manage platform users (coming soon).
+- **Knowledge Base** – Upload documents and materials to help agents understand your exclusive knowledge.
+- **MCP Tools** – Connect servers, sync tools, and view status at a glance (coming soon).
+- **Monitoring & Operations** – Monitor agent runtime status in real time (coming soon).
+- **Model Management** – Manage app information and model configuration, connect the AI capabilities you need.
+- **Memory Management** – Control agents' long-term memory for more efficient conversations.
+- **User Management** – Provide unified user, role, and permission control for teams (coming soon).
Use the language switcher in the top-right corner to toggle between Simplified Chinese and English. The lower-left corner shows the running Nexent version to simplify troubleshooting when asking for help.
diff --git a/doc/docs/en/user-guide/knowledge-base.md b/doc/docs/en/user-guide/knowledge-base.md
index fdce554ac..5885f2b03 100644
--- a/doc/docs/en/user-guide/knowledge-base.md
+++ b/doc/docs/en/user-guide/knowledge-base.md
@@ -19,6 +19,10 @@ Create and manage knowledge bases, upload documents, and generate summaries. Kno

+💡 Hover over the status to understand the progress and error reasons
+
+
+
### Supported File Formats
Nexent supports multiple file formats, including:
diff --git a/doc/docs/en/user-guide/mcp-tools.md b/doc/docs/en/user-guide/mcp-tools.md
new file mode 100644
index 000000000..b55859cbe
--- /dev/null
+++ b/doc/docs/en/user-guide/mcp-tools.md
@@ -0,0 +1,28 @@
+# MCP Tools
+
+The upcoming MCP Tools management module will let you centrally manage MCP servers and tools on a single page, easily completing connection configuration, tool synchronization, and health status monitoring.
+
+## 🎯 Feature Preview
+
+1. Register and manage multiple MCP servers
+2. Quickly sync, view, and organize MCP tool lists
+3. Monitor MCP connection status and usage in real time
+
+## ⏳ Stay Tuned
+
+The MCP Tools management feature is under development. We are committed to building an efficient and intuitive management platform that enables you to:
+
+1. Centrally manage all MCP servers
+2. Conveniently sync and organize tools
+3. Monitor server connections and tool runtime status in real time
+
+## 🚀 Related Features
+
+While waiting for **MCP Tools** to launch, you can:
+
+1. Manage your MCP tools in **[Agent Development](./agent-development)**
+2. View agent and MCP collaboration relationships through **[Agent Space](./agent-space)**
+3. Experience platform features in **[Start Chat](./start-chat)**
+
+If you encounter any issues during use, please refer to our **[FAQ](../quick-start/faq)** or ask for support in [GitHub Discussions](https://github.com/ModelEngine-Group/nexent/discussions).
+
diff --git a/doc/docs/en/user-guide/memory-management.md b/doc/docs/en/user-guide/memory-management.md
index 6e1330b78..0caffb7e1 100644
--- a/doc/docs/en/user-guide/memory-management.md
+++ b/doc/docs/en/user-guide/memory-management.md
@@ -84,9 +84,9 @@ When an agent retrieves memory it follows this order (high ➝ low):
The system takes care of most work for you:
-- **Smart extraction:** Detects important facts in conversations and stores them automatically.
-- **Context injection:** Retrieves the most relevant memories and adds them to prompts silently.
-- **Incremental updates:** Refreshes or removes outdated memories so the store stays clean.
+- **Smart extraction:** Detects key facts in conversations, creates memory entries automatically, and stores them at the right level—no manual input needed.
+- **Automatic context embedding:** Retrieves the most relevant memories and implicitly injects them into the conversation context so agents respond with better accuracy.
+- **Incremental updates:** Gradually refreshes or removes outdated memories to keep the store clean, timely, and reliable.
## ✋ Manual Memory Operations
diff --git a/doc/docs/en/user-guide/model-management.md b/doc/docs/en/user-guide/model-management.md
index db38ea46d..b6490c824 100644
--- a/doc/docs/en/user-guide/model-management.md
+++ b/doc/docs/en/user-guide/model-management.md
@@ -123,8 +123,16 @@ After adding models, assign the platform-level defaults. These models handle sys
#### Embedding Model
-- Powers semantic search for text, images, and other knowledge-base content.
-- Select one of the added embedding models.
+Embedding models are primarily used for vectorization processing of text, images, and other data in knowledge bases, forming the foundation for efficient retrieval and semantic understanding. Configuring an appropriate embedding model can significantly improve knowledge base search accuracy and multimodal data processing capabilities.
+
+- Click the embedding model dropdown to select one from the added embedding models.
+- Embedding model configuration affects the stable operation of knowledge bases.
+
+Choose appropriate document chunk size and chunks per request based on model capabilities. Smaller chunks provide more stability, but may affect file parsing quality.
+
+
+
+
#### Vision-Language Model
diff --git a/doc/docs/en/user-guide/monitor.md b/doc/docs/en/user-guide/monitor.md
new file mode 100644
index 000000000..26752c591
--- /dev/null
+++ b/doc/docs/en/user-guide/monitor.md
@@ -0,0 +1,20 @@
+# Monitoring & Operations
+
+The upcoming Monitoring & Operations Center will provide a unified management platform for agents, allowing you to track health status, performance metrics, and exception events in real time.
+
+## 🎯 Feature Preview
+
+1. Monitor agent health status, latency, and error rates in real time
+2. View and filter runtime logs and historical tasks
+3. Configure alert policies and operational actions for key events
+
+## ⏳ Stay Tuned
+
+The Monitoring & Operations Center is under development. We are committed to building an intuitive and efficient management platform that helps you:
+
+1. Fully understand agent runtime status
+2. Quickly discover and handle exceptions
+3. Flexibly configure alerts and operational actions
+
+If you encounter any issues during use, please refer to our **[FAQ](../quick-start/faq)** or ask for support in [GitHub Discussions](https://github.com/ModelEngine-Group/nexent/discussions).
+
diff --git a/doc/docs/zh/opensource-memorial-wall.md b/doc/docs/zh/opensource-memorial-wall.md
index 7c4a7cc91..9c53ae6ce 100644
--- a/doc/docs/zh/opensource-memorial-wall.md
+++ b/doc/docs/zh/opensource-memorial-wall.md
@@ -623,3 +623,11 @@ Nexent开发者加油
::: tip 开源新手 - 2025-12-05
感谢 Nexent 让我踏上了开源之旅!
:::
+
+::: tip user - 2025-12-10
+很开心能接触到这个平台,让我有机会踏上开源之旅
+:::
+
+::: tip 开源新手 - 2025-12-14
+开放原子大赛接触到了Nexent平台,祝越来越好!
+:::
diff --git a/doc/docs/zh/user-guide/agent-development.md b/doc/docs/zh/user-guide/agent-development.md
index 7d4a28581..ebbcccbc9 100644
--- a/doc/docs/zh/user-guide/agent-development.md
+++ b/doc/docs/zh/user-guide/agent-development.md
@@ -19,6 +19,8 @@
> - **直接导入**:保留重复名称,导入后的智能体会处于不可用状态,需手动修改 Agent 名称和变量名后才能使用
> - **重新生成并导入**:系统将调用 LLM 对 Agent 进行重命名,会消耗一定的模型 token 数,可能耗时较长
+> 📌 **重要说明**:通过导入创建的智能体,如果其工具中包含 `knowledge_base_search` 等知识库检索工具,这些工具只会检索**当前登录用户在本环境中有权限访问的知识库**。导入文件中原有的知识库配置不会自动继承,因此实际检索结果和回答效果,可能与智能体原作者环境下的表现存在差异。
+