RGA DEMO 下部

时间:2024-11-04 14:02:10
#加载llm模型通过ollama最好别用ollama我是没经济条件
from langchain_community.llms import Ollama
llm = Ollama(model="qwen1_5-4b-chat-q2_k")


#pip install langchain_ollama -i https://pypi.tuna.tsinghua.edu.cn/simple
#OllamaEmbeddings 要写地址本地也要写, Ollama_llm本地的不用写地址
from langchain_ollama import OllamaEmbeddings
embeddings = OllamaEmbeddings(model="lrs33/bce-embedding-base_v1",base_url="http://localhost:11434/")


#pip install -qU langchain-postgres -i https://pypi.tuna.tsinghua.edu.cn/simple
from langchain_postgres import PGVector
from langchain_postgres.vectorstores import PGVector

CONNECTION_STRING = "postgresql+psycopg2://postgres:qaz142434@192.168.159.130:5432/postgres"
# 矢量存储名
COLLECTION_NAME = "yaofang_test"
# 连接数据库创建”客户端”
vectorstore = PGVector(
    collection_name=COLLECTION_NAME,
    connection=CONNECTION_STRING,
    embeddings=embeddings,
)

#设置检索条件
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6})
# 一个对话模板,内含2个变量context和question
template = """根据以下上下文回答问题:
{context}
回答: {question}
"""
# 基于模板生成提示
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_template(template)


# 生成输出解析器
from langchain_core.output_parsers import StrOutputParser
output_parser = StrOutputParser()

# 将检索索引器和输入内容(问题)生成检索
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)

def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

rag_chain = (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | prompt
    | llm
    | StrOutputParser()
)

#流式输出
for chunk in rag_chain.stream("java 共有多少种设计模式"):
    print(chunk, end="", flush=True)

输出结果:

此代码流程是:创建向量数据库客户端,连接对应表,设置检索问题向量距离最近的top回调数据,大语言模型推理答案流式输出。

结合这上下部简单的RGA demo 就完成了,当然距离真正的RGA 差十万八千里后续会基于这个骨架开发进一步功能至少提供个UI界面正在考虑使用javaAPI模式,还是python gradio方式好纠结。