2024-12-02 14:25:11 +07:00
|
|
|
import gradio as gr
|
|
|
|
from langchain_openai import ChatOpenAI
|
|
|
|
from langchain.memory import ConversationBufferMemory
|
|
|
|
from langchain_core.output_parsers import StrOutputParser
|
|
|
|
from langchain.prompts.chat import (
|
|
|
|
ChatPromptTemplate,
|
|
|
|
MessagesPlaceholder,
|
|
|
|
HumanMessagePromptTemplate,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Initialize LangChain chat model
|
|
|
|
llm = ChatOpenAI(
|
|
|
|
temperature=0.7,
|
|
|
|
model_name="llama-3.2-3B-instruct", # Replace with your specific model or endpoint if required
|
2024-12-11 08:21:19 +00:00
|
|
|
openai_api_base="https://hub.societyai.com/models/llama-3-2-3b/openai/v1",
|
2024-12-02 14:25:11 +07:00
|
|
|
)
|
|
|
|
|
|
|
|
# Set up the memory for the chatbot
|
|
|
|
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
|
|
|
|
|
|
|
# Create a prompt template that includes conversation history
|
|
|
|
prompt = ChatPromptTemplate.from_messages([
|
|
|
|
MessagesPlaceholder(variable_name="chat_history"),
|
|
|
|
HumanMessagePromptTemplate.from_template("{input}")
|
|
|
|
])
|
|
|
|
|
|
|
|
# Create the chain using RunnableSequence
|
|
|
|
chain = prompt | llm | StrOutputParser()
|
|
|
|
|
|
|
|
# Define the Gradio interface
|
|
|
|
with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
|
|
chatbot = gr.Chatbot()
|
|
|
|
msg = gr.Textbox()
|
|
|
|
clear = gr.Button("Clear")
|
|
|
|
|
|
|
|
def user(user_message, history):
|
|
|
|
"""Appends the user message to the conversation history."""
|
|
|
|
history = history or []
|
|
|
|
history.append((user_message, None))
|
|
|
|
return "", history
|
|
|
|
|
|
|
|
def bot(history):
|
|
|
|
"""Processes the conversation history with LangChain."""
|
|
|
|
try:
|
|
|
|
user_message = history[-1][0]
|
|
|
|
# Update the memory with the user's message
|
|
|
|
memory.chat_memory.add_user_message(user_message)
|
|
|
|
# Get the chat history from memory
|
|
|
|
chat_history = memory.chat_memory.messages
|
|
|
|
# Generate a response using LangChain
|
|
|
|
response = chain.invoke({"input": user_message, "chat_history": chat_history})
|
|
|
|
# Update the memory with the assistant's response
|
|
|
|
memory.chat_memory.add_ai_message(response)
|
|
|
|
# Update the last entry with the assistant's response
|
|
|
|
history[-1] = (user_message, response)
|
|
|
|
return history
|
|
|
|
except Exception as e:
|
|
|
|
# Handle exceptions and display an error message
|
|
|
|
history[-1] = (user_message, f"[Error]: {str(e)}")
|
|
|
|
return history
|
|
|
|
|
|
|
|
def clear_history():
|
|
|
|
memory.clear()
|
|
|
|
return [], ""
|
|
|
|
|
|
|
|
# Set up the Gradio interface components
|
|
|
|
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
|
|
|
bot, chatbot, chatbot
|
|
|
|
)
|
|
|
|
clear.click(fn=clear_history, inputs=None, outputs=[chatbot, msg], queue=False)
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
demo.launch()
|