/v1/chat/completions
OpenAICreates a model response for the given chat conversation using GPT-4o, OpenAI's most advanced multimodal model.
Set to `application/json`.
Bearer token authentication with your OpenAI API key.
ID of the model to use.
Example: gpt-4o
Nucleus sampling parameter.
Defaults to: 1
Example: 0.9
A list of messages comprising the conversation so far.
The maximum number of tokens to generate.
Example: 150
Controls randomness. 0 is deterministic, 1 is creative.
Defaults to: 1
Example: 0.7
Unique identifier for the chat completion.
The model used for completion.
Usage statistics for the completion request.
Object type, always "chat.completion".
List of completion choices returned by the model.
Unix timestamp of creation time.
curl https://api.openai.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_OPENAI_API_KEY" \
-d '{
"model": "gpt-4o",
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is quantum computing?"}
],
"max_tokens": 150,
"temperature": 0.7
}'
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
async function main() {
const completion = await openai.chat.completions.create({
messages: [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is quantum computing?"}
],
model: "gpt-4o",
max_tokens: 150,
temperature: 0.7,
});
console.log(completion.choices[0].message.content);
}
main();
{
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"model": "gpt-4o",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Quantum computing is a revolutionary technology that uses quantum mechanics principles to process information. Unlike classical computers that use bits (0 or 1), quantum computers use quantum bits or 'qubits' that can exist in multiple states simultaneously through superposition."
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 20,
"completion_tokens": 45,
"total_tokens": 65
}
}