import json from openai import OpenAI # Using the OpenAI SDK as a common example # --- Step 1: Define the actual function in your code --- def get_current_weather(location: str, unit: str = "fahrenheit", detailed: bool = False) -> dict: """ Gets the current weather for a given location. In a real app, this would call an external weather API. """ print(f"-> Calling get_current_weather for {location} ({unit}), detailed={detailed}") # Placeholder: mock data for demonstration if location == "San Francisco": base_data = {"temperature": "60", "unit": unit, "condition": "Cloudy"} else: base_data = {"temperature": "72", "unit": unit, "condition": "Sunny"} # Add detailed information if requested if detailed: base_data.update({ "humidity": "70%", "wind_speed": "10 mph", "forecast": "Partly cloudy in the evening" }) return base_data # --- Step 2: Define the function schema for the LLM --- tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "detailed": { "type": "boolean", "description": "Whether to provide detailed weather information including humidity, wind speed, and forecast" } }, "required": ["location"], }, }, } ] model="" # Or another function calling model # --- Step 3: Main logic to interact with the LLM --- def run_conversation(): # Initialize your LLM client (e.g., OpenAI, Gemini, Mistral) client = OpenAI( base_url="http://127.0.0.1:8080/v1", api_key="" ) # Replace with your actual client setup # Initial user message messages = [{"role": "user", "content": "What's the detailed weather like in San Francisco today?"}] # First request: Ask the model if it needs to call a function response = client.chat.completions.create( model=model, # Or another function calling model messages=messages, tools=tools, tool_choice="auto", ) response_message = response.choices[0].message tool_calls = response_message.tool_calls # Check if the model decided to call a function if tool_calls: print("Tools called!") # Step 4: Execute the function in your application messages.append(response_message) # Extend conversation with the function call request for tool_call in tool_calls: function_name = tool_call.function.name function_args = json.loads(tool_call.function.arguments) # Call the local function dynamically available_functions = {"get_current_weather": get_current_weather} function_to_call = available_functions[function_name] function_response = function_to_call(**function_args) # Step 5: Send the function output back to the model messages.append( { "tool_call_id": tool_call.id, "role": "tool", "name": function_name, "content": json.dumps(function_response), } ) # Second request: Get the final answer from the model final_response = client.chat.completions.create( model=model, messages=messages, ) return final_response.choices[0].message.content else: # Model returned a text response directly return response_message.content # --- Run the example --- if __name__ == "__main__": weather_report = run_conversation() print("\n--- Final Answer ---") print(weather_report)