diff --git a/beisi_rag/backend/__pycache__/backend.cpython-311.pyc b/beisi_rag/backend/__pycache__/backend.cpython-311.pyc new file mode 100644 index 00000000..2340c624 Binary files /dev/null and b/beisi_rag/backend/__pycache__/backend.cpython-311.pyc differ diff --git a/beisi_rag/backend/backend.py b/beisi_rag/backend/backend.py new file mode 100644 index 00000000..d9a1c7b4 --- /dev/null +++ b/beisi_rag/backend/backend.py @@ -0,0 +1,170 @@ +from __future__ import annotations +import os +from pathlib import Path +from typing import List, AsyncGenerator +from fastapi import FastAPI, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import StreamingResponse +from pydantic import BaseModel, Field +from dotenv import load_dotenv +import dashscope +from dashscope.aigc.generation import AioGeneration +from langchain_community.vectorstores import FAISS +from langchain_community.embeddings import DashScopeEmbeddings +from langchain_core.documents import Document + +# 加载环境变量 +load_dotenv(dotenv_path=Path(__file__).resolve().parents[1] / "config" / ".env") + +# 初始化DashScope API +DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY") +BASE_URL = os.getenv("OPENAI_BASE_URL") +MODEL_NAME = os.getenv("MODEL_NAME", "qwen-plus") +TOP_K_DEFAULT = int(os.getenv("TOP_K", "4")) + +# 初始化 DashScope 生成客户端 +client = AioGeneration() + +# 加载 FAISS 向量数据库 +INDEX_DIR = Path(__file__).parent.parent / "vectordb" +emb = DashScopeEmbeddings(dashscope_api_key=DASHSCOPE_API_KEY, model="text-embedding-v3") +vectordb = FAISS.load_local(str(INDEX_DIR), emb, allow_dangerous_deserialization=True) + +# 格式化上下文 +def formatContext(docs: List[Document]) -> str: + parts = [] + for i, d in enumerate(docs, 1): + src = (d.metadata or {}).get("source", "unknown") + txt = (d.page_content or "").replace("\n", " ") + if len(txt) > 500: + txt = txt[:500] + "…" + parts.append(f"[{i}] ({src}) {txt}") + return "\n".join(parts) + +# 同步检索 +def retrieve(question: str, k: int) -> List[Document]: + retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": k}) + return retriever.invoke(question) + +SYSTEM_PROMPT = ( + "你是严谨的中文检索增强助手。严格依据给定上下文回答;" + "若上下文没有答案,请明确说“不确定”,并给出你能确认的线索。" +) + +# 构建用户提示 +def buildUserPrompt(question: str, context: str) -> str: + return ( + "结合<已检索上下文>作答:\n" + f"<已检索上下文>\n{context}\n\n\n" + f"用户问题:{question}\n" + "要求:若答案不在上下文里,明确说明不确定;用中文、分点作答,必要时给出引用的原句摘要。" + ) + +# FastAPI应用设置 +app = FastAPI(title="Qwen RAG QA Backend", version="1.0.0") +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# 请求模型 +class ChatReq(BaseModel): + question: str + top_k: int = Field(default=TOP_K_DEFAULT, ge=1, le=12) + model: str = Field(default=MODEL_NAME) + temperature: float = Field(default=0.3, ge=0, le=1) + +class ChatResp(BaseModel): + answer: str + +# 流式接口 +async def _sse_event(data: str) -> bytes: + print(f"发送流数据: {data}") # 增加调试信息 + return f"data: {data}\n\n".encode("utf-8") + +@app.post("/chat/stream") +async def chat_stream(req: ChatReq): + try: + docs = retrieve(req.question, req.top_k) + context = formatContext(docs) + prompt = buildUserPrompt(req.question, context) + + async def event_gen() -> AsyncGenerator[bytes, None]: + yield await _sse_event("{\"type\":\"meta\",\"message\":\"stream-start\"}") + + # 调试: 输出流的开始 + print("流式处理开始...") + + try: + # 使用 DashScope 生成流式回答 + stream = dashscope.Generation.call( + api_key=DASHSCOPE_API_KEY, + model=req.model, + messages=[{"role": "system", "content": SYSTEM_PROMPT}, + {"role": "user", "content": prompt}], + result_format="message", + enable_thinking=True, + stream=True, + incremental_output=True, + ) + + reasoning_content = "" + answer_content = "" + is_answering = False + + # 逐步处理流数据 + for chunk in stream: + print(f"处理流数据: {chunk.output}") # 增加调试信息 + + if chunk.output.choices[0].message.content == "" and chunk.output.choices[0].message.reasoning_content == "": + continue + elif chunk.output.choices[0].message.reasoning_content != "" and chunk.output.choices[0].message.content == "": + reasoning_content += chunk.output.choices[0].message.reasoning_content + yield await _sse_event(chunk.output.choices[0].message.reasoning_content) + elif chunk.output.choices[0].message.content != "": + if not is_answering: + yield await _sse_event("\n" + "=" * 20 + "完整回复" + "=" * 20) + is_answering = True + answer_content += chunk.output.choices[0].message.content + yield await _sse_event(chunk.output.choices[0].message.content) + + yield await _sse_event("[DONE]") + except Exception as ie: + # 错误事件 + err = str(ie).replace("\n", " ") + yield await _sse_event(f"{{\"type\":\"error\",\"message\":\"{err}\"}}") + + return StreamingResponse(event_gen(), media_type="text/event-stream") + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +# 非流式接口 +@app.post("/chat", response_model=ChatResp) +async def chat(req: ChatReq): + try: + docs = retrieve(req.question, req.top_k) + context = formatContext(docs) + prompt = buildUserPrompt(req.question, context) + + response = await AioGeneration.call( + api_key=DASHSCOPE_API_KEY, + model=req.model, + messages=[{"role": "system", "content": SYSTEM_PROMPT}, + {"role": "user", "content": prompt}], + result_format="message", + ) + answer = response.output.choices[0].message.content.strip() + return ChatResp(answer=answer) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +if __name__ == "__main__": + import uvicorn + HOST = os.getenv("HOST", "0.0.0.0") + PORT = int(os.getenv("PORT", "8000")) + uvicorn.run(app, host=HOST, port=PORT, reload=False) diff --git a/beisi_rag/config/.env b/beisi_rag/config/.env new file mode 100644 index 00000000..1e44a6dc --- /dev/null +++ b/beisi_rag/config/.env @@ -0,0 +1,7 @@ +export DASHSCOPE_API_KEY="***************************************************" +export OPENAI_API_KEY="$DASHSCOPE_API_KEY" +export OPENAI_BASE_URL="https://dashscope-intl.aliyuncs.com/compatible-mode/v1" +export MODEL_NAME="qwen-plus" +export TOP_K="4" + + diff --git a/beisi_rag/requirements.txt b/beisi_rag/requirements.txt new file mode 100644 index 00000000..c6dfda1b --- /dev/null +++ b/beisi_rag/requirements.txt @@ -0,0 +1,14 @@ +faiss-cpu==1.12.0 +importlib-metadata==8.0.0 +jaraco.collections==5.1.0 +langchain==1.0.3 +langchain-community==0.4.1 +langchain-openai==1.0.1 +pip-chill==1.0.3 +platformdirs==4.2.2 +tomli==2.0.1 +unicorn==2.1.4 +uvicorn==0.38.0 +fastapi +dotenv +dashscope \ No newline at end of file diff --git a/beisi_rag/src/chat_qwen_rag.py b/beisi_rag/src/chat_qwen_rag.py new file mode 100644 index 00000000..d86c5a09 --- /dev/null +++ b/beisi_rag/src/chat_qwen_rag.py @@ -0,0 +1,161 @@ +# /Users/profighted/beisi-tech/docs/RAG-Anything/beisi_rag/chat_qwen_rag.py +import os +from pathlib import Path +from dotenv import load_dotenv + +from langchain_community.vectorstores import FAISS +from langchain_community.embeddings import DashScopeEmbeddings +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import RunnableParallel, RunnablePassthrough, RunnableLambda +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, BaseMessage + +from openai import OpenAI + +load_dotenv(dotenv_path=Path(__file__).resolve().parents[1] / "config" / ".env") + +# 本地向量库目录(确保与 ingest 阶段一致) +INDEX_DIR = Path(__file__).parent.parent / "vectordb" + +# ====== 配置 Qwen 兼容端点 ====== +DASHSCOPE_API_KEY = os.environ.get("DASHSCOPE_API_KEY") +assert DASHSCOPE_API_KEY, "请先 export DASHSCOPE_API_KEY=你的通义 DashScope Key" + +# 如需新加坡地域,改成 https://dashscope-intl.aliyuncs.com/compatible-mode/v1 +DASHSCOPE_BASE_URL = os.environ.get( + "DASHSCOPE_BASE_URL", + "https://dashscope.aliyuncs.com/compatible-mode/v1", +) + +# 对话模型 & 向量模型(按你账号可用情况调整) +CHAT_MODEL = os.environ.get("QWEN_CHAT_MODEL", "qwen-plus") # 也可用 qwen2.5-7b-instruct / qwen-turbo 等 +EMBEDDING_MODEL = os.environ.get("QWEN_EMBED_MODEL", "text-embedding-v3") + + +def load_retriever(): + """加载 FAISS 检索器(与 ingest 使用同一 Embedding 模型)""" + embeddings = DashScopeEmbeddings( + dashscope_api_key=DASHSCOPE_API_KEY, + model=EMBEDDING_MODEL, + ) + vectordb = FAISS.load_local(str(INDEX_DIR), embeddings, allow_dangerous_deserialization=True) + return vectordb.as_retriever(search_type="similarity", search_kwargs={"k": 4}) + + +def build_llm_runnable(): + """ + 用 OpenAI 兼容端点(Qwen)构建一个 LangChain Runnable。 + 避免使用 langchain_openai,降低版本依赖冲突风险。 + """ + client = OpenAI(api_key=DASHSCOPE_API_KEY, base_url=DASHSCOPE_BASE_URL) + + def _lc_to_openai_messages(prompt_value) -> list[dict]: + """ + 将 LangChain 的 PromptValue / BaseMessage 列表转换为 OpenAI 兼容 messages。 + """ + if hasattr(prompt_value, "to_messages"): + msgs = prompt_value.to_messages() # List[BaseMessage] + elif isinstance(prompt_value, list) and all(isinstance(m, BaseMessage) for m in prompt_value): + msgs = prompt_value + else: + # 兜底:当成用户单轮输入 + msgs = [HumanMessage(content=str(prompt_value))] + + out = [] + for m in msgs: + if isinstance(m, SystemMessage): + role = "system" + elif isinstance(m, HumanMessage): + role = "user" + elif isinstance(m, AIMessage): + role = "assistant" + else: + role = "user" + out.append({"role": role, "content": m.content}) + return out + + def _invoke(prompt_value: BaseMessage | list[BaseMessage] | str) -> str: + messages = _lc_to_openai_messages(prompt_value) + resp = client.chat.completions.create( + model=CHAT_MODEL, + messages=messages, + temperature=0.3, + ) + return resp.choices[0].message.content + + return RunnableLambda(_invoke) + + +# RAG_PROMPT = ChatPromptTemplate.from_template( +# """你是严谨的检索增强助手。结合<已检索上下文>回答用户问题。 +# - 如果答案不在上下文里,请明确说明“不确定”并给出你能确认的线索。 +# - 用中文输出,尽量给出引用的原句摘要,并在末尾标注引用编号(如 [1][3])。 + +# <已检索上下文> +# {context} +# + +# 用户问题:{question} +# """ +# ) + +RAG_PROMPT = ChatPromptTemplate.from_template( + """你是严谨的检索增强助手。结合<已检索上下文>回答用户问题。 + -- 如果答案不在上下文里,请明确说明“不确定”并给出你能确认的线索。 + -- 用中文输出,尽量给出引用的原句摘要,并在末尾标注引用编号(如 [1][3])。 + 你是严谨的检索增强助手。请**用你自己的话**综合回答,禁止大段原文粘贴。 + +规则: + +1) 先总结,再给出处;答案主体必须是**你自己的表述**。 + +2) 如需引用原句,每处引用≤50字,并用引号与编号标注,如 “……”[1]。 + +3) 如果上下文没有明确答案,请说“不确定”,并给出可验证的线索。 + +4) 输出中文、结构化要点,并在末尾列出参考编号。 + + <已检索上下文> + {context} + + + 用户问题:{question} + """ +) + + + +def format_docs(docs): + out = [] + for i, d in enumerate(docs, 1): + meta = d.metadata or {} + src = meta.get("source", "unknown") + # 摘要最多 500 字符,避免过长提示上下文 + out.append(f"[{i}] ({src}) {d.page_content[:500]}") + return "\n\n".join(out) + + +def main(): + retriever = load_retriever() + llm_runnable = build_llm_runnable() + + # RAG 链:检索 → 拼接上下文 → 提示词 → Qwen(兼容端点) → 解析文本 + chain = ( + RunnableParallel(context=retriever | format_docs, question=RunnablePassthrough()) + | RAG_PROMPT + | llm_runnable + | StrOutputParser() + ) + + print("💬 输入你的问题(Ctrl+C 退出)") + while True: + try: + q = input("> ").strip() + if not q: + continue + ans = chain.invoke(q) + print("\n" + ans + "\n") + except (EOFError, KeyboardInterrupt): + print("\n再见~") + break + except Exception as e: + print("❌ 出错:", e) + + +if __name__ == "__main__": + main() diff --git a/beisi_rag/src/ingest_qwen_faiss.py b/beisi_rag/src/ingest_qwen_faiss.py new file mode 100644 index 00000000..f7919b16 --- /dev/null +++ b/beisi_rag/src/ingest_qwen_faiss.py @@ -0,0 +1,75 @@ +# /Users/profighted/beisi-tech/docs/beisi_rag/ingest_qwen_faiss.py +import os +from pathlib import Path +from dotenv import load_dotenv + +from langchain_community.document_loaders import TextLoader, DirectoryLoader +from langchain_text_splitters import RecursiveCharacterTextSplitter +from langchain_community.vectorstores import FAISS +from langchain_community.embeddings import DashScopeEmbeddings + +load_dotenv() + +DATA_DIR = Path(__file__).parent.parent / "my-website" / "docs" +INDEX_DIR = Path(__file__).parent / "vectordb" +INDEX_DIR.mkdir(exist_ok=True) + +MAX_BATCH = 10 # DashScope 限制:一次 <= 10 + +def load_docs(): + loader = DirectoryLoader( + str(DATA_DIR), + glob="**/*", + loader_cls=TextLoader, + show_progress=True + ) + return loader.load() + +def chunked(iterable, n): + for i in range(0, len(iterable), n): + yield iterable[i:i + n] + +def main(): + # 1) 加载与切分 + docs = load_docs() + splitter = RecursiveCharacterTextSplitter( + chunk_size=800, + chunk_overlap=120, + separators=["\n\n", "\n", "。", "!", "?", ",", " ", ""] + ) + splits = splitter.split_documents(docs) + texts = [d.page_content for d in splits] + metas = [d.metadata for d in splits] + + # 2) Qwen(DashScope)嵌入 + api_key = os.environ.get("DASHSCOPE_API_KEY") + if not api_key: + raise RuntimeError("请先 export DASHSCOPE_API_KEY=你的key") + + embeddings = DashScopeEmbeddings( + dashscope_api_key=api_key, + model="text-embedding-v3", # 按你账号可用模型名调整 + ) + + # 3) 建立 FAISS,并以“<=10条/批”的方式喂入 + if len(texts) == 0: + raise RuntimeError("没有可用文本分片,请检查 data/ 是否有可读文件。") + + # 先用第一批初始化索引 + first_texts = texts[:MAX_BATCH] + first_metas = metas[:MAX_BATCH] + store = FAISS.from_texts(first_texts, embeddings, metadatas=first_metas) + + # 追加其余批次 + for batch_texts, batch_metas in zip( + chunked(texts[MAX_BATCH:], MAX_BATCH), + chunked(metas[MAX_BATCH:], MAX_BATCH), + ): + store.add_texts(batch_texts, metadatas=batch_metas) + + # 4) 保存索引 + store.save_local(str(INDEX_DIR)) + print(f"✅ 完成向量化,共 {len(texts)} 段,索引保存在: {INDEX_DIR}") + +if __name__ == "__main__": + main() diff --git a/beisi_rag/src/test.py b/beisi_rag/src/test.py new file mode 100644 index 00000000..7d306061 --- /dev/null +++ b/beisi_rag/src/test.py @@ -0,0 +1,25 @@ +import os +from openai import OpenAI +from dotenv import load_dotenv +load_dotenv() + +try: + client = OpenAI( + # 若没有配置环境变量,请用阿里云百炼API Key将下行替换为:api_key="sk-xxx", + # 新加坡和北京地域的API Key不同。获取API Key:https://help.aliyun.com/zh/model-studio/get-api-key + api_key=os.getenv("DASHSCOPE_API_KEY"), + # 以下是北京地域base_url,如果使用新加坡地域的模型,需要将base_url替换为:https://dashscope-intl.aliyuncs.com/compatible-mode/v1 + base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", + ) + + completion = client.chat.completions.create( + model="qwen-plus", # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models + messages=[ + {'role': 'system', 'content': 'You are a helpful assistant.'}, + {'role': 'user', 'content': '你是谁?'} + ] + ) + print(completion.choices[0].message.content) +except Exception as e: + print(f"错误信息:{e}") + print("请参考文档:https://help.aliyun.com/zh/model-studio/developer-reference/error-code") \ No newline at end of file diff --git a/beisi_rag/vectordb/index.faiss b/beisi_rag/vectordb/index.faiss new file mode 100644 index 00000000..61e97ef1 Binary files /dev/null and b/beisi_rag/vectordb/index.faiss differ diff --git a/beisi_rag/vectordb/index.pkl b/beisi_rag/vectordb/index.pkl new file mode 100644 index 00000000..ae08d1a5 Binary files /dev/null and b/beisi_rag/vectordb/index.pkl differ diff --git a/my-website/src/components/ChatWidget/chatwidget.module.css b/my-website/src/components/ChatWidget/chatwidget.module.css new file mode 100644 index 00000000..74e328d3 --- /dev/null +++ b/my-website/src/components/ChatWidget/chatwidget.module.css @@ -0,0 +1,36 @@ +.fab{ + position: fixed; right: 20px; bottom: 20px; + width: 56px; height: 56px; border-radius: 50%; + border: none; background: #3b82f6; color:#fff; font-weight:700; + box-shadow: 0 8px 20px rgba(0,0,0,.15); cursor: pointer; z-index: 9999; +} +.panel{ + position: fixed; right: 20px; bottom: 90px; + width: 340px; height: 460px; background:#fff; border-radius: 12px; + box-shadow: 0 16px 40px rgba(0,0,0,.2); display:flex; flex-direction:column; + overflow:hidden; z-index: 9999; +} +.header{ + padding:12px 14px; font-weight:700; background:#f8fafc; + border-bottom:1px solid #e5e7eb; display:flex; justify-content:space-between; align-items:center; +} +.switch{ font-size:12px; display:flex; gap:6px; align-items:center; } +.messages{ flex:1; padding:12px; overflow:auto; background:#fafafa; } +.msg{ display:flex; margin-bottom:8px; } +.user{ justify-content:flex-end; } +.assistant{ justify-content:flex-start; } +.bubble{ + max-width: 78%; padding:8px 10px; border-radius:10px; line-height:1.4; + background:#e5e7eb; color:#111827; white-space:pre-wrap; +} +.user .bubble{ background:#3b82f6; color:#fff; } +.inputBar{ + display:flex; gap:8px; padding:10px; border-top:1px solid #e5e7eb; background:#fff; +} +.inputBar input{ + flex:1; height:36px; padding:6px 10px; border:1px solid #d1d5db; border-radius:8px; outline:none; +} +.inputBar button{ + height:36px; padding:0 12px; border:none; border-radius:8px; + background:#111827; color:#fff; cursor:pointer; +} diff --git a/my-website/src/components/ChatWidget/chatwidget.tsx b/my-website/src/components/ChatWidget/chatwidget.tsx new file mode 100644 index 00000000..de55e4a2 --- /dev/null +++ b/my-website/src/components/ChatWidget/chatwidget.tsx @@ -0,0 +1,168 @@ +import React, {useMemo, useRef, useState} from 'react'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; +import styles from './chatwidget.module.css'; + +type Role = 'user' | 'assistant'; +interface Msg { role: Role; text: string; } + +const CHAT_ID = 'default'; + +export default function ChatWidget() { + const {siteConfig} = useDocusaurusContext(); + const API_BASE = + (siteConfig as any)?.customFields?.CHAT_API_BASE ?? 'http://115.190.200.222:8000'; + + const [open, setOpen] = useState(false); + const [msgs, setMsgs] = useState([ + {role: 'assistant', text: '嗨,我是你的智能助手~'}, + ]); + const [userInput, setUserInput] = useState(''); + const [loading, setLoading] = useState(false); + const [useStream, setUseStream] = useState(false); + const listRef = useRef(null); + + const chatUrl = useMemo(() => `${API_BASE}/chat`, [API_BASE]); + const streamUrl = useMemo(() => `${API_BASE}/chat/stream`, [API_BASE]); + + const scrollToBottom = () => { + requestAnimationFrame(() => { + const el = listRef.current; + if (el) el.scrollTop = el.scrollHeight; + }); + }; + + const send = async () => { + const q = userInput.trim(); + if (!q || loading) return; + setMsgs((m) => [...m, {role: 'user', text: q}]); + setUserInput(''); + setLoading(true); + + try { + if (!useStream || !('ReadableStream' in globalThis)) { + // --- 非流式 --- + const res = await fetch(chatUrl, { + method: 'POST', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({ + question: q, + top_k: 4, + model: 'qwen-plus', + temperature: 0.3, + }), + // credentials: 'include', + }); + if (!res.ok) throw new Error(`${res.status} ${res.statusText}`); + const data = await res.json(); + const answer = data?.answer ?? data?.data?.answer ?? ''; + setMsgs((m) => [...m, {role: 'assistant', text: String(answer)}]); + } else { + // 流式 + const res = await fetch(streamUrl, { + method: 'POST', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({ + question: q, + top_k: 4, + model: 'qwen-plus', + temperature: 0.3, + }), + // credentials: 'include', + }); + if (!res.ok || !res.body) throw new Error('SSE 不可用'); + + const reader = res.body.getReader(); + const decoder = new TextDecoder('utf-8'); + let buf = ''; + + while (true) { + const {done, value} = await reader.read(); + if (done) break; + buf += decoder.decode(value, {stream: true}); + + // 按 SSE 块拆分:每块以 \n\n 结束,且行以 "data: " 开头 + const chunks = buf.split('\n\n'); + buf = chunks.pop() ?? ''; + + for (const chunk of chunks) { + if (!chunk.startsWith('data:')) continue; + const payload = chunk.replace(/^data:\s*/, '').trim(); + if (!payload) continue; // 最后一条空消息 + if (payload === '[DONE]') continue; + + let obj: any = null; + try { obj = JSON.parse(payload); } catch { /* 纯文本 */ } + + if (obj?.type === 'meta') { + // 开始事件,忽略 + } else if (obj?.type === 'reasoning') { + setMsgs((m) => [...m, {role: 'assistant', text: `思考:${obj.text || ''}`}]); + } else if (obj?.type === 'sep') { + setMsgs((m) => [...m, {role: 'assistant', text: '———'}]); + } else if (obj?.type === 'answer') { + setMsgs((m) => [...m, {role: 'assistant', text: obj.text || ''}]); + } else { + // 兼容纯文本 + setMsgs((m) => [...m, {role: 'assistant', text: String(obj ?? payload)}]); + } + scrollToBottom(); + } + } + } + } catch (e: any) { + setMsgs((m) => [...m, {role: 'assistant', text: `❗请求失败:${e?.message || e}`}]); + } finally { + setLoading(false); + scrollToBottom(); + } + }; + + return ( + <> + + + {open && ( +
+
+ 聊天助手 + +
+ +
+ {msgs.map((m, i) => ( +
+
{m.text}
+
+ ))} + {loading && ( +
+
思考中…
+
+ )} +
+ +
+ setUserInput(e.target.value)} + onKeyDown={(e) => (e.key === 'Enter' ? send() : null)} + placeholder="输入问题并回车" + /> + +
+
+ )} + + ); +} diff --git a/my-website/src/theme/Root.tsx b/my-website/src/theme/Root.tsx new file mode 100644 index 00000000..a74c0ed1 --- /dev/null +++ b/my-website/src/theme/Root.tsx @@ -0,0 +1,15 @@ +import React from 'react'; +import {useLocation} from '@docusaurus/router'; +import ChatWidget from '../components/ChatWidget/chatwidget'; + +export default function Root({children}) { + const location = useLocation(); + const isHome = location.pathname === '/' || location.pathname.endsWith('/index.html'); + + return ( + <> + {children} + {!isHome && } + + ); +}