利用openai,Llama-index库实现多功能的agent [Datawhale学习]
参考资料:GitHub - datawhalechina/wow-agent: A simple and trans-platform agent framework and tutorial目次
一、借助Llama-index实现算数agent
注意:要在.env文件中配置好apikey
1.先构建大模子(智谱glm-4-flash模子)
2.构建其需要利用的算数工具
3.完整代码
二、多模子接入agent
借助Llama-index和OpenWeatherMap实现实时天气
1.构建大模子与(一、借助Llama-index实现agent)相同,这里着重说明后半部分
2.由于OpenWeatherMap是利用经度纬度获取天气,以是需要将地区转换为经纬度
3.再利用经纬度获取天气信息
4.完整代码如下
效果如下
三、数据库接入agent
1.本地安装ollama
运行效果
2.毗连数据库的模子
首先创建数据库,并添加数据
假如你是用的是pycharm,以下方法可以直观的查看表中数据
注意事项:
3.再用本地摆设的模子ollama作为llm实现agent
注意需要提前安装这些库
效果如下
四、RAG接入agent
1.首先接入本地的模子
2.构建索引
3.构建问答引擎
4.完整代码体验RAG
注意:将以下内容命名为问答手册.txt,放置在代码运行目次下(供模子学习)
运行效果
5.将RAG作为工具供agent利用
完整代码
效果如下
五、搜索引擎接入agent
1.接入模子
2.构建llm模子
3.配置搜索引擎
4.完整代码
效果如下
一、借助Llama-index实现算数agent
注意:要在.env文件中配置好apikey
申请一个智谱的免费api参考利用openai,Llama-index实现agent -CSDN博客
1.先构建大模子(智谱glm-4-flash模子)
本次由于要借助react框架,因此启用流式相应更能反映模子自我推理本领
import os
from dotenv import load_dotenv
from openai import OpenAI
from pydantic import Field# 导入Field,用于Pydantic模型中定义字段的元数据
from llama_index.core.llms import (
CustomLLM,
CompletionResponse,
LLMMetadata,
)
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.llms.callbacks import llm_completion_callback
from typing import List, Any, Generator
# 加载环境变量
load_dotenv()
# 从环境变量中读取api_key
api_key = os.getenv('ZHIPU_API_KEY')
base_url = "https://open.bigmodel.cn/api/paas/v4"
chat_model = "glm-4-flash"
# 定义OurLLM类,继承自CustomLLM基类
class OurLLM(CustomLLM):
api_key: str = Field(default=api_key)
base_url: str = Field(default=base_url)
model_name: str = Field(default=chat_model)
client: OpenAI = Field(default=None, exclude=True)# 显式声明 client 字段
def __init__(self, api_key: str, base_url: str, model_name: str = chat_model, **data: Any):
super().__init__(**data)
self.api_key = api_key
self.base_url = base_url
self.model_name = model_name
self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)# 使用传入的api_key和base_url初始化 client 实例
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(
model_name=self.model_name,
)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
response = self.client.chat.completions.create(model=self.model_name, messages=[{"role": "user", "content": prompt}])
if hasattr(response, 'choices') and len(response.choices) > 0:
response_text = response.choices.message.content
return CompletionResponse(text=response_text)
else:
raise Exception(f"Unexpected response format: {response}")
@llm_completion_callback()
def stream_complete(
self, prompt: str, **kwargs: Any
) -> Generator:
response = self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
stream=True
)
try:
for chunk in response:
chunk_message = chunk.choices.delta
if not chunk_message.content:
continue
content = chunk_message.content
yield CompletionResponse(text=content, delta=content)
except Exception as e:
raise Exception(f"Unexpected response format: {e}")
llm = OurLLM(api_key=api_key, base_url=base_url, model_name=chat_model)
2.构建其需要利用的算数工具
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool
def multiply(a: float, b: float) -> float:
"""Multiply two numbers and returns the product"""
return a * b
def add(a: float, b: float) -> float:
"""Add two numbers and returns the sum"""
return a + b
def main():
multiply_tool = FunctionTool.from_defaults(fn=multiply)
add_tool = FunctionTool.from_defaults(fn=add)
# 创建ReActAgent实例
agent = ReActAgent.from_tools(, llm=llm, verbose=True)
response = agent.chat("20+(2*4)等于多少?使用工具计算每一步")
print(response)3.完整代码
import os
from dotenv import load_dotenv
from openai import OpenAI
from pydantic import Field# 导入Field,用于Pydantic模型中定义字段的元数据
from llama_index.core.llms import (
CustomLLM,
CompletionResponse,
LLMMetadata,
)
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.llms.callbacks import llm_completion_callback
from typing import List, Any, Generator
# 加载环境变量
load_dotenv()
# 从环境变量中读取api_key
api_key = os.getenv('ZHIPU_API_KEY')
base_url = "https://open.bigmodel.cn/api/paas/v4"
chat_model = "glm-4-flash"
# 定义OurLLM类,继承自CustomLLM基类
class OurLLM(CustomLLM):
api_key: str = Field(default=api_key)
base_url: str = Field(default=base_url)
model_name: str = Field(default=chat_model)
client: OpenAI = Field(default=None, exclude=True)# 显式声明 client 字段
def __init__(self, api_key: str, base_url: str, model_name: str = chat_model, **data: Any):
super().__init__(**data)
self.api_key = api_key
self.base_url = base_url
self.model_name = model_name
self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)# 使用传入的api_key和base_url初始化 client 实例
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(
model_name=self.model_name,
)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
response = self.client.chat.completions.create(model=self.model_name, messages=[{"role": "user", "content": prompt}])
if hasattr(response, 'choices') and len(response.choices) > 0:
response_text = response.choices.message.content
return CompletionResponse(text=response_text)
else:
raise Exception(f"Unexpected response format: {response}")
@llm_completion_callback()
def stream_complete(
self, prompt: str, **kwargs: Any
) -> Generator:
response = self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
stream=True
)
try:
for chunk in response:
chunk_message = chunk.choices.delta
if not chunk_message.content:
continue
content = chunk_message.content
yield CompletionResponse(text=content, delta=content)
except Exception as e:
raise Exception(f"Unexpected response format: {e}")
llm = OurLLM(api_key=api_key, base_url=base_url, model_name=chat_model)
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool
def multiply(a: float, b: float) -> float:
"""Multiply two numbers and returns the product"""
return a * b
def add(a: float, b: float) -> float:
"""Add two numbers and returns the sum"""
return a + b
def main():
multiply_tool = FunctionTool.from_defaults(fn=multiply)
add_tool = FunctionTool.from_defaults(fn=add)
# 创建ReActAgent实例
agent = ReActAgent.from_tools(, llm=llm, verbose=True)
response = agent.chat("20+(2*4)等于多少?使用工具计算每一步")
print(response)if __name__ == "__main__": main()用智谱glm-4-flash运行效果
https://i-blog.csdnimg.cn/direct/9e5f4f36860a4f728a802edbeee6fd4d.png
GPT-4o运行效果
https://i-blog.csdnimg.cn/direct/f9dc346313da413dbf7fa296e837bb99.png
可以两个效果对比一下,好像也差不多,可能模子简单的情况下不易区分
二、多模子接入agent
借助Llama-index和OpenWeatherMap实现实时天气
OpenWeatherMap具体利用请参考OpenWeatherMap API指南-CSDN博客
1.构建大模子与(一、借助Llama-index实现agent)相同,这里着重说明后半部分
(首先需要去OpenWeatherMap官网注册申请“API密钥”:Members,并将密钥储存在.env文件中)
即OpenWeatherMap_API_KEY=
2.由于OpenWeatherMap是利用经度纬度获取天气,以是需要将地区转换为经纬度
def get_coordinates(city: str) -> dict:
api_key = os.getenv('OpenWeatherMap_API_KEY')
# OpenWeatherMap API endpoint for current weather
url = f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api_key}"
try:
# Send a GET request to the OpenWeatherMap API
response = requests.get(url)
# Check if the response is successful (status code 200)
if respo
免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。
页:
[1]