Tool Config Files

Generate ready-to-use LLM config files for all currently active free APIs

11 active providers · Last updated: 2026-03-30
LiteLLM Proxy Docs

Unified LLM proxy supporting 100+ model routes

Quick Start

  1. 1

    Get API Keys

    Obtain an API key from each provider's console

  2. 2

    Replace placeholders

    Replace all YOUR_*_API_KEY values with your real keys

  3. 3

    Place config file

    Save as ./config.yaml

  4. 4

    Start the proxy

    Run litellm --config config.yaml

Filename litellm-config.yaml

Format yaml

yaml
# LiteLLM Proxy Configuration
# Generated on 2026-03-30
# Includes 11 active providers

model_list:
  - model_name: aihubmix/gpt-4.1-mini-free
    litellm_params:
      model: aihubmix/gpt-4.1-mini-free
      api_base: https://aihubmix.com/v1
      api_key: "YOUR_AIHUBMIX_API_KEY"
  - model_name: aihubmix/coding-glm-5.1-free
    litellm_params:
      model: aihubmix/coding-glm-5.1-free
      api_base: https://aihubmix.com/v1
      api_key: "YOUR_AIHUBMIX_API_KEY"
  - model_name: openrouter/step-3.5-flash:free
    litellm_params:
      model: openrouter/stepfun/step-3.5-flash:free
      api_base: https://openrouter.ai/api/v1
      api_key: "YOUR_OPENROUTER_API_KEY"
  - model_name: openrouter/minimax-m2.5:free
    litellm_params:
      model: openrouter/minimax/minimax-m2.5:free
      api_base: https://openrouter.ai/api/v1
      api_key: "YOUR_OPENROUTER_API_KEY"
  - model_name: groq/llama-3.3-70b-versatile
    litellm_params:
      model: groq/llama-3.3-70b-versatile
      api_base: https://api.groq.com/openai/v1
      api_key: "YOUR_GROQ_API_KEY"
  - model_name: groq/llama-4-scout-17b-16e-instruct
    litellm_params:
      model: groq/meta-llama/llama-4-scout-17b-16e-instruct
      api_base: https://api.groq.com/openai/v1
      api_key: "YOUR_GROQ_API_KEY"
  - model_name: cohere/command-a-03-2025
    litellm_params:
      model: cohere/command-a-03-2025
      api_base: https://api.cohere.ai/compatibility/v1
      api_key: "YOUR_COHERE_API_KEY"
  - model_name: cohere/command-r-plus
    litellm_params:
      model: cohere/command-r-plus
      api_base: https://api.cohere.ai/compatibility/v1
      api_key: "YOUR_COHERE_API_KEY"
  - model_name: huggingface/Llama-3.3-70B-Instruct
    litellm_params:
      model: huggingface/meta-llama/Llama-3.3-70B-Instruct
      api_base: https://router.huggingface.co/v1
      api_key: "YOUR_HUGGINGFACE_API_KEY"
  - model_name: huggingface/DeepSeek-V3-0324
    litellm_params:
      model: huggingface/deepseek-ai/DeepSeek-V3-0324
      api_base: https://router.huggingface.co/v1
      api_key: "YOUR_HUGGINGFACE_API_KEY"
  - model_name: cerebras/llama3.1-8b
    litellm_params:
      model: cerebras/llama3.1-8b
      api_base: https://api.cerebras.ai/v1
      api_key: "YOUR_CEREBRAS_API_KEY"
  - model_name: cerebras/qwen-3-235b-a22b-instruct-2507
    litellm_params:
      model: cerebras/qwen-3-235b-a22b-instruct-2507
      api_base: https://api.cerebras.ai/v1
      api_key: "YOUR_CEREBRAS_API_KEY"
  - model_name: sambanova/Meta-Llama-3.3-70B-Instruct
    litellm_params:
      model: sambanova/Meta-Llama-3.3-70B-Instruct
      api_base: https://api.sambanova.ai/v1
      api_key: "YOUR_SAMBANOVA_API_KEY"
  - model_name: sambanova/Meta-Llama-3.1-405B-Instruct
    litellm_params:
      model: sambanova/Meta-Llama-3.1-405B-Instruct
      api_base: https://api.sambanova.ai/v1
      api_key: "YOUR_SAMBANOVA_API_KEY"
  - model_name: github-models/gpt-4o
    litellm_params:
      model: github_models/gpt-4o
      api_base: https://models.inference.ai.azure.com
      api_key: "YOUR_GITHUB_MODELS_API_KEY"
  - model_name: github-models/Llama-3.3-70B-Instruct
    litellm_params:
      model: github_models/meta-llama/Llama-3.3-70B-Instruct
      api_base: https://models.inference.ai.azure.com
      api_key: "YOUR_GITHUB_MODELS_API_KEY"
  - model_name: bigmodel/glm-4.7-flash
    litellm_params:
      model: bigmodel/glm-4.7-flash
      api_base: https://open.bigmodel.cn/api/paas/v4
      api_key: "YOUR_BIGMODEL_API_KEY"
  - model_name: bigmodel/glm-4-flash-250414
    litellm_params:
      model: bigmodel/glm-4-flash-250414
      api_base: https://open.bigmodel.cn/api/paas/v4
      api_key: "YOUR_BIGMODEL_API_KEY"
  - model_name: ollama-cloud/deepseek-v3.2
    litellm_params:
      model: ollama_cloud/deepseek-v3.2
      api_base: https://ollama.com/v1
      api_key: "YOUR_OLLAMA_CLOUD_API_KEY"
  - model_name: ollama-cloud/kimi-k2.5
    litellm_params:
      model: ollama_cloud/kimi-k2.5
      api_base: https://ollama.com/v1
      api_key: "YOUR_OLLAMA_CLOUD_API_KEY"
  - model_name: google-ai-studio/gemini-2.5-flash-lite
    litellm_params:
      model: google_ai-studio/gemini-2.5-flash-lite
      api_base: https://generativelanguage.googleapis.com/v1beta/openai
      api_key: "YOUR_GOOGLE_AI_STUDIO_API_KEY"
  - model_name: google-ai-studio/gemini-2.5-flash
    litellm_params:
      model: google_ai-studio/gemini-2.5-flash
      api_base: https://generativelanguage.googleapis.com/v1beta/openai
      api_key: "YOUR_GOOGLE_AI_STUDIO_API_KEY"

litellm_settings:
  drop_params: true
  set_verbose: false