2.1 库的安装(OpenAI为例)

pip install langchain
pip install openai

2.2 QA任务

# 模型创建
from langchain.llms import OpenAI
llm = OpenAI(temperature=0.9)

# 使用例子
text = "what is the results of 5+6?"
print(llm(text)) # 返回 11

2.3 输入prompts模板设置

from langchain.prompts import PromptTemplate
prompt = PromptTemplate(
    input_variables=["product"],
    template="What is a good name for a company that makes {product}?",
)
print(prompt.format(product="colorful socks")) 
# 返回 What is a good name for a company that makes colorful socks?

text = prompt.format(product="colorful socks")
print(llm(text)) 
# 返回 Socktastic!

2.4 Memory功能,记录交互的历史状态,基于历史状态修正模型预测

Untitled

from langchain import OpenAI, ConversationChain
llm = OpenAI(temperature=0)
conversation = ConversationChain(llm=llm, verbose=True)
conversation.predict(input="Hi there!") 
# 返回如下
#> Entering new ConversationChain chain...
#Prompt after formatting:
#The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific #details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.

#Current conversation:

#Human: Hi there!
#AI:

#> Finished chain.
# Out[53]: " Hi there! It's nice to meet you. How can I help you today?"
conversation.predict(input="I'm doing well! Just having a conversation with an AI.") 
# 返回如下
#Prompt after formatting:
#The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific #details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.

#Current conversation:

#Human: Hi there!
#AI:  Hi there! It's nice to meet you. How can I help you today?
#Human: I'm doing well! Just having a conversation with an AI.
#AI:

#> Finished chain.
#Out[54]: " That's great! It's always nice to have a conversation with someone new. What would you like to talk about?"

2.5 使用本地模型(从HuggingfaceHub)

from langchain import PromptTemplate, HuggingFaceHub, LLMChain, HuggingFacePipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
import torch

# 创建本地模型
model_id = "google/flan-t5-xxl"
model_task = "text2text-generation"
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, cache_dir='./')
tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir='./')
pipe = pipeline(
    model_task,
    model=model,
    tokenizer=tokenizer,
    max_length=200
)
llm = HuggingFacePipeline(pipeline=pipe)

# 创建对话
chat_chain = LLMChain(
    llm=llm,
    verbose=True,
)