GPT Proto
Home/Skills/openclaw-gptproto-config

openclaw-gptproto-config

OpenClaw GPTProto model configuration assistant. Triggered when the user mentions configuring GPTProto models for OpenClaw, setting up GPTProto API, adding a GPTProto provider, modifying OpenClaw model configuration, changing the OpenClaw default model, openclaw gptproto config, openclaw model setup, or needs to configure GPTProto models in OpenClaw. Automatically handles API Key retrieval, model configuration generation, and file writing.

Download for Windows

config-template.json

{
  "models": {
    "mode": "merge",
    "providers": {
      "gptproto": {
        "baseUrl": "https://gptproto.com",
        "apiKey": "<GPTPROTO_API_KEY>",
        "api": "anthropic-messages",
        "models": [
          {
            "id": "claude-opus-4-6",
            "name": "Claude Opus 4.6",
            "reasoning": true,
            "input": ["text"],
            "cost": {
              "input": 0,
              "output": 0,
              "cacheRead": 0,
              "cacheWrite": 0
            },
            "contextWindow": 200000,
            "maxTokens": 100000
          },
          {
            "id": "claude-sonnet-4-6",
            "name": "Claude Sonnet 4.6",
            "reasoning": false,
            "input": ["text"],
            "cost": {
              "input": 0,
              "output": 0,
              "cacheRead": 0,
              "cacheWrite": 0
            },
            "contextWindow": 200000,
            "maxTokens": 100000
          },
          {
            "id": "glm-5",
            "name": "GLM-5",
            "reasoning": false,
            "input": ["text"],
            "cost": {
              "input": 0,
              "output": 0,
              "cacheRead": 0,
              "cacheWrite": 0
            },
            "contextWindow": 128000,
            "maxTokens": 100000
          },
          {
            "id": "kimi-k2.5",
            "name": "Kimi K2.5",
            "reasoning": false,
            "input": ["text"],
            "cost": {
              "input": 0,
              "output": 0,
              "cacheRead": 0,
              "cacheWrite": 0
            },
            "contextWindow": 128000,
            "maxTokens": 100000
          },
          {
            "id": "MiniMax-M2.5",
            "name": "MiniMax M2.5",
            "reasoning": false,
            "input": ["text"],
            "cost": {
              "input": 0,
              "output": 0,
              "cacheRead": 0,
              "cacheWrite": 0
            },
            "contextWindow": 128000,
            "maxTokens": 100000
          }
        ]
      },
      "gptproto-openai": {
        "baseUrl": "https://gptproto.com/v1",
        "apiKey": "<GPTPROTO_API_KEY>",
        "api": "openai-completions",
        "models": [
          {
            "id": "gpt-4o",
            "name": "GPT-4o",
            "reasoning": false,
            "input": ["text"],
            "cost": {
              "input": 0,
              "output": 0,
              "cacheRead": 0,
              "cacheWrite": 0
            },
            "contextWindow": 128000,
            "maxTokens": 16384
          },
          {
            "id": "gpt-5.4",
            "name": "GPT-5.4",
            "reasoning": false,
            "input": ["text"],
            "cost": {
              "input": 0,
              "output": 0,
              "cacheRead": 0,
              "cacheWrite": 0
            },
            "contextWindow": 1050000,
            "maxTokens": 128000
          }
        ]
      }
    }
  },
  "agents": {
    "defaults": {
      "model": {
        "primary": "gptproto/claude-opus-4-6"
      },
      "models": {
        "gptproto/claude-opus-4-6": {},
        "gptproto/claude-sonnet-4-6": {},
        "gptproto/glm-5": {},
        "gptproto/kimi-k2.5": {},
        "gptproto/MiniMax-M2.5": {},
        "gptproto-openai/gpt-4o": {},
        "gptproto-openai/gpt-5.4": {}
      }
    }
  },
  "gateway": { "mode": "local" }
}