Skip to content

Latest commit

 

History

History
269 lines (224 loc) · 6.12 KB

README-python.md

File metadata and controls

269 lines (224 loc) · 6.12 KB

LLM API call examples

This repository contains a list of working code examples for calling various LLM APIs.

README.md is the source of truth and contains all examples in curl format.

README-python.md contains the same examples in Python, and is generated automatically using GPT-3.5 whenever README.md is updated.

README-js.md contains the same examples in JavaScript, and is generated automatically using GPT-3.5 whenever README.md is updated.

See also: List of cloud hosts for inference and fine-tuning

Table of Contents

OpenAI

🔑 Get API key here.

📃 API docs.

Chat

import requests
import os

response = requests.post(
    "https://api.openai.com/v1/chat/completions",
    headers={
        "Content-Type": "application/json",
        "Authorization": f"Bearer {os.environ['OPENAI_API_KEY']}"
    },
    json={
        "model": "gpt-3.5-turbo",
        "messages": [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": "Hello!"}
        ]
    }
)

Embeddings

import requests
import os

response = requests.post(
    "https://api.openai.com/v1/embeddings",
    headers={
        "Authorization": "Bearer " + os.environ.get("OPENAI_API_KEY"),
        "Content-Type": "application/json"
    },
    json={
        "input": "The food was delicious and the wine...",
        "model": "text-embedding-ada-002",
        "encoding_format": "float"
    }
)

Anthropic

🔑 Get API key here.

📃 API docs.

Chat

import requests
import os

response = requests.post(
    "https://api.anthropic.com/v1/complete",
    headers={
        "accept": "application/json",
        "anthropic-version": "2023-06-01",
        "content-type": "application/json",
        "x-api-key": os.environ.get("ANTHROPIC_API_KEY")
    },
    json={
        "model": "claude-2.1",
        "prompt": "\n\nHuman: Hello, world!\n\nAssistant:",
        "max_tokens_to_sample": 256
    }
)

Cohere

🔑 Get API key here.

📃 API docs.

Chat

import requests
import os

response = requests.post(
    "https://api.cohere.ai/v1/chat",
    headers={
        "accept": "application/json",
        "content-type": "application/json",
        "Authorization": f"Bearer {os.environ['COHERE_API_KEY']}"
    },
    json={
        "chat_history": [
            {"role": "USER", "message": "Who discovered gravity?"},
            {"role": "CHATBOT", "message": "The man who is widely credited with discovering gravity is Sir Isaac Newton"}
        ],
        "message": "What year was he born?",
        "connectors": [{"id": "web-search"}]
    }
)

Embeddings

import requests
import os

response = requests.post(
    "https://api.cohere.ai/v1/embed",
    headers={
        "accept": "application/json",
        "content-type": "application/json",
        "Authorization": f"Bearer {os.environ['COHERE_API_KEY']}"
    },
    json={
        "texts": [
            "hello",
            "goodbye"
        ],
        "truncate": "END"
    }
)

Mistral

🔑 Get API key here.

📃 API docs.

Chat

import requests
import os

response = requests.post(
    "https://api.mistral.ai/v1/chat/completions",
    headers={
        "Content-Type": "application/json",
        "Accept": "application/json",
        "Authorization": f"Bearer {os.environ.get('MISTRAL_API_KEY')}"
    },
    json={
        "model": "mistral-tiny",
        "messages": [{"role": "user", "content": "Who is the most renowned French writer?"}]
    }
)

Embeddings

import requests
import os

response = requests.post(
    "https://api.mistral.ai/v1/embeddings",
    headers={
        "Content-Type": "application/json",
        "Accept": "application/json",
        "Authorization": f"Bearer {os.environ['MISTRAL_API_KEY']}"
    },
    json={
        "model": "mistral-embed",
        "input": ["Embed this sentence.", "As well as this one."]
    }
)

Google

🔑 Get API key here.

📃 API docs.

Chat

import requests
import os

response = requests.post(
    "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=" + os.environ["GOOGLE_API_KEY"],
    headers={
        "Content-Type": "application/json"
    },
    json={
        "contents": [
            {
                "parts": [
                    {
                        "text": "Write a story about a magic backpack."
                    }
                ]
            }
        ]
    }
)

Embeddings

import requests
import os

response = requests.post(
    "https://generativelanguage.googleapis.com/v1beta/models/embedding-001:generateContent?key=" + os.environ.get("GOOGLE_API_KEY"),
    headers={
        "Content-Type": "application/json"
    },
    json={
        "contents": [
            {
                "parts": [
                    {
                        "text": "This is a sentence."
                    }
                ]
            }
        ]
    }
)

Groq

🔑 Get API key here.

📃 API docs.

Chat

import requests
import os

response = requests.post(
    "https://api.groq.com/openai/v1/chat/completions",
    headers={
        "Content-Type": "application/json",
        "Authorization": f"Bearer {os.environ['GROQ_API_KEY']}"
    },
    json={
        "model": "mixtral-8x7b-32768",
        "messages": [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": "Hello!"}
        ]
    }
)