# 遍历会话状态中的消息列表 for message in st.session_state.messages: # 使用与消息角色对应的聊天消息区域 with st.chat_message(message["role"]): # 在聊天消息区域显示消息内容 st.markdown(message["content"]) # 初始化一个聊天输入框,鼓励用户输入消息并@特定的助手 prompt = st.chat_input("随时@你想要的[海哥Python]+ 使用闽南菜助手!")
# 如果用户输入了消息 if prompt: # 将用户消息添加到会话状态中的消息列表 st.session_state.messages.append({"role": "user", "content": prompt}) # 显示用户消息 with st.chat_message("user"): st.markdown(prompt)
# 检查是否启用了特定的OpenAI模型 if
st.session_state["openai_model"] in ["gpt-3.5-turbo", "gpt-4", "qwen-turbo", "qwen-max"]: # 显示助手正在思考的消息 with st.chat_message("assistant"): with st.spinner("Thinking..."): # 准备向后端服务请求的数据 request_inputs = { "messages": st.session_state.messages, "model_name": st.session_state["openai_model"] } print(request_inputs) # 调用后端服务,获取助手的回复 if request_inputs["model_name"] in ["qwen-turbo", "qwen-max"]: stream_invoke = xiao_hei_zi response = st.write_stream(stream_invoke(**request_inputs)) # 显示助手的回复 st.markdown(response) # 将助手的回复添加到消息列表 st.session_state.messages.append({"role": "assistant", "content": response})
导入的 GPT 大模型请求(gpt_bot_stream.py):
#! -*-conding=: UTF-8 -*-
from langchain_openai import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser
from dotenv import load_dotenv, find_dotenv # 导入dotenv库,用于加载环境变量
if __name__ == '__main__': for response in stream_invoke(messages="介绍一下泉州海蛎煎?详细些!"): print(response)
导入的千问大模型请求(qw_bot.py):
#! -*-conding=: UTF-8 -*-
from dotenv import find_dotenv, load_dotenv from langchain_community.llms import Tongyi from langchain_core.runnables import RunnableSequence from langchain.prompts import PromptTemplate
load_dotenv(find_dotenv())
defxiao_hei_zi(messages: str = '介绍一下泉州海蛎煎?详细些!', model_name: str = "qwen-turbo"): llm = "" match model_name: case "qwen-turbo": llm = Tongyi(temperature=1, model_name=model_name) case "qwen-max": llm = Tongyi(temperature=1, model_name=model_name)