Tools (Function calling)
Use tools to let the model call your backend functions. The model decides when to call a tool, streams arguments if needed, and you post the tool result back as a tool message linked via tool_call_id. Then you ask the model to continue with the new context.
Endpoint:
POST https://api.aifoundryhub.com/v1/chat/completions
How it works (flow)
Section titled “How it works (flow)”- Define tools — Provide one or more functions with a JSON‑Schema
parametersobject. - Ask the model — Send
messagesandtoolswithtool_choice: "auto"(or force a specific tool). - Model returns a tool call — Read
choices[0].message.tool_calls[]→{ id, type: 'function', function: { name, arguments } }. - Run your function — Execute the real function in your code; capture the result string/JSON.
- Return a tool message — Append
{ role: 'tool', content: <result>, tool_call_id: <id> }tomessages. - Continue the chat — Call the endpoint again; the model will use the tool output to finish.
Example — call a weather tool
Section titled “Example — call a weather tool”import OpenAI from "openai";
const client = new OpenAI({ apiKey: process.env.AI_FOUNDRY_HUB_API_KEY, baseURL: "https://api.aifoundryhub.com/v1",});
const tools = [ { type: "function", function: { name: "get_current_weather", description: "Get current weather for a city", parameters: { type: "object", properties: { location: { type: "string", description: "City or location name" }, unit: { type: "string", enum: ["celsius", "fahrenheit"] } }, required: ["location", "unit"] } } }];
const messages = [ { role: "system", content: "You can call functions to help the user." }, { role: "user", content: "Weather in Moscow in celsius?" }];
// 1) Ask model; it may return a tool calllet run = await client.chat.completions.create({ model: "gpt-4.1", messages, tools, tool_choice: "auto",});
const msg = run.choices[0].message;if (msg.tool_calls?.length) { // Always include the assistant message that requested tool(s) messages.push({ role: "assistant", content: msg.content ?? "", tool_calls: msg.tool_calls });
for (const call of msg.tool_calls) { const args = JSON.parse(call.function.arguments || "{}"); // 2) Execute your function — mock implementation here const result = `It is 20°C and clear in ${args.location}.`;
// 3) Return tool result linked by tool_call_id messages.push({ role: "tool", tool_call_id: call.id, content: result }); }
// 4) Continue the chat with tool outputs run = await client.chat.completions.create({ model: "gpt-4.1", messages, });}
console.log(run.choices[0].message.content?.[0]?.text ?? run.choices[0].message.content);import os, jsonfrom openai import OpenAI
client = OpenAI( api_key=os.getenv("AI_FOUNDRY_HUB_API_KEY"), base_url="https://api.aifoundryhub.com/v1",)
tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get current weather info in a specified location", "parameters": { "type": "object", "properties": { "location": {"type": "string", "description": "City or location name"}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location", "unit"], }, }, }]
messages = [ {"role": "system", "content": "You are an assistant that can call tools."}, {"role": "user", "content": "What's the weather in Moscow? I'd like it in Celsius."},]
run = client.chat.completions.create( model="gpt-4.1", messages=messages, tools=tools, tool_choice="auto",)
assistant = run.choices[0].messageif assistant.tool_calls: # Include the assistant message that requested the tool messages.append({ "role": "assistant", "content": assistant.content, "tool_calls": assistant.tool_calls, })
for call in assistant.tool_calls: args = json.loads(call.function.arguments or "{}") # Simulate tool execution result = f"It's currently 20°C and clear in {args.get('location','?')}" # Post tool result with the tool_call_id messages.append({ "role": "tool", "tool_call_id": call.id, "content": result, })
# Ask the model to continue with the tool outputs followup = client.chat.completions.create( model="gpt-4.1", messages=messages, ) print(followup.choices[0].message.content[0].text)else: print(assistant.content)package main
import ( "context" "encoding/json" "fmt" "os"
openai "github.com/openai/openai-go" "github.com/openai/openai-go/option")
func main() { ctx := context.Background()
// Create a client with a custom base URL and API key. client := openai.NewClient( option.WithAPIKey(os.Getenv("AI_FOUNDRY_HUB_API_KEY")), option.WithBaseURL("https://api.aifoundryhub.com/v1"), )
question := "What is the weather in New York City?"
print("> ") println(question)
params := openai.ChatCompletionNewParams{ Messages: []openai.ChatCompletionMessageParamUnion{ openai.UserMessage(question), }, Tools: []openai.ChatCompletionToolParam{ { Function: openai.FunctionDefinitionParam{ Name: "get_weather", Description: openai.String("Get weather at the given location"), Parameters: openai.FunctionParameters{ "type": "object", "properties": map[string]interface{}{ "location": map[string]string{ "type": "string", }, }, "required": []string{"location"}, }, }, }, }, Seed: openai.Int(0), Model: "gemini-2.5-pro", }
// Make initial chat completion request completion, err := client.Chat.Completions.New(ctx, params) if err != nil { panic(err) }
toolCalls := completion.Choices[0].Message.ToolCalls // Return early if there are no tool calls if len(toolCalls) == 0 { fmt.Printf("No function call") return }
// If there is a was a function call, continue the conversation params.Messages = append(params.Messages, completion.Choices[0].Message.ToParam()) for _, toolCall := range toolCalls { if toolCall.Function.Name == "get_weather" { // Extract the location from the function call arguments var args map[string]interface{} err := json.Unmarshal([]byte(toolCall.Function.Arguments), &args) if err != nil { panic(err) } location := args["location"].(string)
// Simulate getting weather data weatherData := getWeather(location)
// Print the weather data fmt.Printf("Weather in %s: %s\n", location, weatherData)
params.Messages = append(params.Messages, openai.ToolMessage(weatherData, toolCall.ID)) } }
completion, err = client.Chat.Completions.New(ctx, params) if err != nil { panic(err) }
println(completion.Choices[0].Message.Content)}
// Mock function to simulate weather data retrievalfunc getWeather(location string) string { // In a real implementation, this function would call a weather API return "Sunny, 25°C"}Request parameters (tool‑specific)
Section titled “Request parameters (tool‑specific)”Returns
Section titled “Returns”A chat.completion object. When tools are used, the assistant message contains tool_calls[]. After you send role: "tool" messages and call again, the assistant will respond with natural language.
{ "choices": [ { "message": { "role": "assistant", "content": [], "tool_calls": [ { "index": 0, "id": "call_abc123", "type": "function", "function": { "name": "get_current_weather", "arguments": "{\"location\":\"Moscow\",\"unit\":\"celsius\"}" } } ] } } ]}Note on streaming: when
stream: true, function arguments may arrive incrementally inchat.completion.chunkdeltas underchoices[].delta.tool_calls[].function.arguments. Concatenate the chunks to build the full JSON, then parse.