使用 Azure OpenAI 的 function calling
2024-06-21
使用Azure OpenAI的function calling功能,通过设置环境变量、初始化客户端、创建助手并定义函数、处理函数调用和提交函数输出,实现与Bing搜索API的集成。
为了使用 Azure OpenAI 的助手函数调用,可以按照以下步骤进行操作:
1. 设置环境变量
首先,确保环境变量已经设置好:
import os os.environ["AZURE_OPENAI_API_KEY"] = "your-azure-openai-api-key" os.environ["AZURE_OPENAI_ENDPOINT"] = "your-azure-openai-endpoint"
2. 初始化 Azure OpenAI 客户端
from openai import AzureOpenAI client = AzureOpenAI( api_key=os.getenv("AZURE_OPENAI_API_KEY"), api_version="2024-02-15-preview", azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT") )
3. 创建助手并定义函数
assistant = client.beta.assistants.create( instructions="You are a weather bot. Use the provided functions to answer questions.", model="gpt-4-1106-preview", # 替换为模型部署名称 tools=[ { "type": "function", "function": { "name": "getCurrentWeather", "description": "Get the weather in location", "parameters": { "type": "object", "properties": { "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, "unit": {"type": "string", "enum": ["c", "f"]} }, "required": ["location"] } } }, { "type": "function", "function": { "name": "getNickname", "description": "Get the nickname of a city", "parameters": { "type": "object", "properties": { "location": {"type": "string", "description": "The city and state e.g. Los Angeles, CA"}, }, "required": ["location"] } } } ] )
4. 读取函数调用
启动助手后,当助手触发函数调用时,可以检索运行信息:
{ "id": "run_abc123", "object": "thread.run", "assistant_id": "asst_abc123", "thread_id": "thread_abc123", "status": "requires_action", "required_action": { "type": "submit_tool_outputs", "submit_tool_outputs": { "tool_calls": [ { "id": "call_abc123", "type": "function", "function": { "name": "getCurrentWeather", "arguments": "{\"location\":\"San Francisco\"}" } }, { "id": "call_abc456", "type": "function", "function": { "name": "getNickname", "arguments": "{\"location\":\"Los Angeles\"}" } } ] } }, ... }
5. 提交函数输出
根据函数调用结果,提交函数输出:
run = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_abc123", # 替换为实际的线程 ID run_id="run_abc123", # 替换为实际的运行 ID tool_outputs=[ { "tool_call_id": "call_abc123", # 替换为实际的工具调用 ID "output": "22C", }, { "tool_call_id": "call_abc456", # 替换为实际的工具调用 ID "output": "LA", }, ] )
提交工具输出后,运行将进入 queued
状态,然后继续执行。
使用 Bing 搜索 API 并通过函数调用将其与 Azure OpenAI 集成
为了使用 Bing 搜索 API 并通过函数调用将其与 Azure OpenAI 集成,可以按照以下步骤操作。以下是详细的步骤和代码示例:
1. 安装必要的包
首先,确保安装了所需的包:
pip install requests openai~=1.10
2. 设置参数
配置 Azure OpenAI 和 Bing 搜索资源的详细信息:
azure_endpoint = "https://<YOUR_RESOURCE_NAME>.openai.azure.com" api_version = "2024-02-15-preview" aoai_api_key = "<AOAI_RESOURCE_API_KEY>" deployment_name = "<DEPLOYMENT_NAME>" bing_search_subscription_key = "<BING_SEARCH_SUBSCRIPTION_KEY>" bing_search_url = "https://api.bing.microsoft.com/v7.0/search"
3. 定义 Bing 搜索函数
定义一个调用 Bing 搜索 API 的函数:
import json import requests def search(query: str) -> list: """ Perform a bing search against the given query @param query: Search query @return: List of search results """ headers = {"Ocp-Apim-Subscription-Key": bing_search_subscription_key} params = {"q": query, "textDecorations": False} response = requests.get(bing_search_url, headers=headers, params=params) response.raise_for_status() search_results = response.json() output = [] for result in search_results["webPages"]["value"]: output.append({"title": result["name"], "link": result["url"], "snippet": result["snippet"]}) return json.dumps(output)
4. 定义辅助函数
定义一些必要的辅助函数来处理运行状态、创建消息和检索消息:
import time from openai import AzureOpenAI from pathlib import Path from typing import Optional def poll_run_till_completion(client, thread_id, run_id, available_functions, verbose, max_steps=10, wait=3): if (client is None and thread_id is None) or run_id is None: print("Client, Thread ID and Run ID are required.") return try: cnt = 0 while cnt < max_steps: run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) if verbose: print("Poll {}: {}".format(cnt, run.status)) cnt += 1 if run.status == "requires_action": tool_responses = [] if run.required_action.type == "submit_tool_outputs" and run.required_action.submit_tool_outputs.tool_calls is not None: tool_calls = run.required_action.submit_tool_outputs.tool_calls for call in tool_calls: if call.type == "function": if call.function.name not in available_functions: raise Exception("Function requested by the model does not exist") function_to_call = available_functions[call.function.name] tool_response = function_to_call(**json.loads(call.function.arguments)) tool_responses.append({"tool_call_id": call.id, "output": tool_response}) run = client.beta.threads.runs.submit_tool_outputs( thread_id=thread_id, run_id=run.id, tool_outputs=tool_responses ) if run.status == "failed": print("Run failed.") break if run.status == "completed": break time.sleep(wait) except Exception as e: print(e) def create_message(client, thread_id, role="", content="", file_ids=None, metadata=None, message_id=None): if metadata is None: metadata = {} if file_ids is None: file_ids = [] if client is None: print("Client parameter is required.") return None if thread_id is None: print("Thread ID is required.") return None try: if message_id is not None: return client.beta.threads.messages.retrieve(thread_id=thread_id, message_id=message_id) if file_ids and metadata: return client.beta.threads.messages.create(thread_id=thread_id, role=role, content=content, file_ids=file_ids, metadata=metadata) if file_ids: return client.beta.threads.messages.create(thread_id=thread_id, role=role, content=content, file_ids=file_ids) if metadata: return client.beta.threads.messages.create(thread_id=thread_id, role=role, content=content, metadata=metadata) return client.beta.threads.messages.create(thread_id=thread_id, role=role, content=content) except Exception as e: print(e) return None def retrieve_and_print_messages(client, thread_id, verbose, out_dir=None): if client is None and thread_id is None: print("Client and Thread ID are required.") return None try: messages = client.beta.threads.messages.list(thread_id=thread_id) display_role = {"user": "User query", "assistant": "Assistant response"} prev_role = None if verbose: print("\n\nCONVERSATION:") for md in reversed(messages.data): if prev_role == "assistant" and md.role == "user" and verbose: print("------ \n") for mc in md.content: if mc.type == "text": txt_val = mc.text.value elif mc.type == "image_file": image_data = client.files.content(mc.image_file.file_id) if out_dir is not None: out_dir_path = Path(out_dir) if out_dir_path.exists(): image_path = out_dir_path / (mc.image_file.file_id + ".png") with image_path.open("wb") as f: f.write(image_data.read()) if verbose: if prev_role == md.role: print(txt_val) else: print("{}:\n{}".format(display_role[md.role], txt_val)) prev_role = md.role return messages except Exception as e: print(e) return None
5. 创建并运行助手
定义助手,提供其功能,并运行搜索查询:
name = "websearch-assistant" instructions = """You are an assistant designed to help people answer questions. You have access to query the web using Bing Search. You should call bing search whenever a question requires up to date information or could benefit from web data. """ message = {"role": "user", "content": "How tall is mount rainier?"} tools = [ { "type": "function", "function": { "name": "search_bing", "description": "Searches bing to get up-to-date information from the web.", "parameters": { "type": "object", "properties": { "query": { "type": "string", "description": "The search query", } }, "required": ["query"], }, }, } ] available_functions = {"search_bing": search} verbose_output = True client = AzureOpenAI(api_key=aoai_api_key, api_version=api_version, azure_endpoint=azure_endpoint) assistant = client.beta.assistants.create( name=name, description="", instructions=instructions, tools=tools, model=deployment_name ) thread = client.beta.threads.create() create_message(client, thread.id, message["role"], message["content"]) run = client.beta.threads.runs.create(thread_id=thread.id, assistant_id=assistant.id, instructions=instructions) poll_run_till_completion( client=client, thread_id=thread.id, run_id=run.id, available_functions=available_functions, verbose=verbose_output ) messages = retrieve_and_print_messages(client=client, thread_id=thread.id, verbose=verbose_output)