Pomidor

Moonshot: no wait / Qwen: no wait / GLM: no wait / Cohere: no wait / Grok: no wait / Deepseek: no wait / Groq: no wait / GPT-4o Mini / 3.5 Turbo: no wait / GPT-4: no wait / GPT-4 Turbo: no wait / GPT-4o: no wait / GPT-4.1: no wait / GPT-4.1 Mini: no wait / GPT-4.1 Nano: no wait / GPT-5: no wait / GPT-5 Mini: no wait / GPT-5 Nano: no wait / GPT-5 Chat Latest: no wait / OpenAI o1: no wait / OpenAI o3 mini: no wait / OpenAI o3: no wait / OpenAI o4 mini: no wait / OpenAI Codex Mini: no wait / DALL-E: no wait / GPT Image: no wait / Claude (Sonnet): no wait / Claude (Opus): no wait / Gemini Flash: no wait / Gemini Pro: no wait / Gemini Ultra: no wait / Mistral 7B: no wait / Mistral Nemo: no wait / Mistral Medium: no wait / Mistral Large: no wait / Groq: no wait

Rentry
13.11.2025
Добавил нового провайдера с локальными моделями - Groq.
18.11.2025
Убрал у preview моделей зависимость от billing ключей. Кароч, gemeni 3 должна работать.


Service Info

{
  "uptime": 128812,
  "endpoints": {
    "openai": "https://oai-proxy-5xi1.onrender.com/proxy/openai",
    "openai-image": "https://oai-proxy-5xi1.onrender.com/proxy/openai-image",
    "anthropic": "https://oai-proxy-5xi1.onrender.com/proxy/anthropic",
    "google-ai": "https://oai-proxy-5xi1.onrender.com/proxy/google-ai",
    "mistral-ai": "https://oai-proxy-5xi1.onrender.com/proxy/mistral-ai",
    "deepseek": "https://oai-proxy-5xi1.onrender.com/proxy/deepseek",
    "xai": "https://oai-proxy-5xi1.onrender.com/proxy/xai",
    "cohere": "https://oai-proxy-5xi1.onrender.com/proxy/cohere",
    "qwen": "https://oai-proxy-5xi1.onrender.com/proxy/qwen",
    "glm": "https://oai-proxy-5xi1.onrender.com/proxy/glm",
    "moonshot": "https://oai-proxy-5xi1.onrender.com/proxy/moonshot",
    "groq": "https://oai-proxy-5xi1.onrender.com/proxy/groq"
  },
  "proompts": 26076,
  "tookens": "462.19m ($1099.78)",
  "proomptersNow": 3,
  "openaiKeys": 22,
  "openaiOrgs": 20,
  "anthropicKeys": 4,
  "google-aiKeys": 86,
  "mistral-aiKeys": 31,
  "deepseekKeys": 114,
  "xaiKeys": 1,
  "cohereKeys": 13,
  "qwenKeys": 99,
  "glmKeys": 18,
  "moonshotKeys": 25,
  "groqKeys": 20,
  "turbo": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 4,
    "revokedKeys": 9,
    "Requests": 0,
    "overQuotaKeys": 9,
    "trialKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4": {
    "usage": "11.91m (In: 11.33m, Out: 584.8k) ($374.97)",
    "activeKeys": 4,
    "Requests": 450,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4-turbo": {
    "usage": "1.07m (In: 1.01m, Out: 58.5k) ($11.90)",
    "activeKeys": 4,
    "Requests": 63,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4o": {
    "usage": "2.01m (In: 1.92m, Out: 88.0k) ($5.68)",
    "activeKeys": 4,
    "Requests": 209,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt41": {
    "usage": "141.4k (In: 138.9k, Out: 2.5k) ($0.30)",
    "activeKeys": 4,
    "Requests": 12,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt41-mini": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 4,
    "Requests": 0,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt41-nano": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 4,
    "Requests": 0,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt5": {
    "usage": "90.0k (In: 77.2k, Out: 12.8k) ($0.22)",
    "activeKeys": 4,
    "Requests": 8,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt5-mini": {
    "usage": "2.4k (In: 351, Out: 2.0k) ($0.00)",
    "activeKeys": 4,
    "Requests": 14,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt5-nano": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 4,
    "Requests": 0,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt5-chat-latest": {
    "usage": "515.9k (In: 492.6k, Out: 23.3k) ($0.85)",
    "activeKeys": 4,
    "Requests": 62,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "o1": {
    "usage": "8.5k (In: 2.7k, Out: 5.8k) ($0.39)",
    "activeKeys": 3,
    "Requests": 107,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "o3": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 3,
    "Requests": 0,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "o3-mini": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 3,
    "Requests": 0,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "o4-mini": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 4,
    "Requests": 0,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "codex-mini": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 2,
    "Requests": 0,
    "overQuotaKeys": 6,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "dall-e": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 3,
    "Requests": 0,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt-image": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 3,
    "Requests": 0,
    "overQuotaKeys": 9,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "claude": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 0,
    "revokedKeys": 2,
    "Requests": 0,
    "overQuotaKeys": 2,
    "trialKeys": 0,
    "prefilledKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "claude-opus": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 0,
    "revokedKeys": 2,
    "Requests": 0,
    "overQuotaKeys": 2,
    "trialKeys": 0,
    "prefilledKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemini-flash": {
    "usage": "398.3k (In: 319.4k, Out: 78.9k) ($0.10)",
    "activeKeys": 81,
    "revokedKeys": 4,
    "Requests": 131,
    "overQuotaKeys": 1,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemini-pro": {
    "usage": "441.13m (In: 424.25m, Out: 16.88m) ($699.10)",
    "activeKeys": 7,
    "revokedKeys": 3,
    "Requests": 24739,
    "overQuotaKeys": 65,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemini-ultra": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 1,
    "revokedKeys": 0,
    "Requests": 0,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-tiny": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 31,
    "revokedKeys": 0,
    "Requests": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-small": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 31,
    "revokedKeys": 0,
    "Requests": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-medium": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 31,
    "revokedKeys": 0,
    "Requests": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-large": {
    "usage": "7.0k (In: 4.9k, Out: 2.1k) ($0.02)",
    "activeKeys": 31,
    "revokedKeys": 0,
    "Requests": 1,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "deepseek": {
    "usage": "942.9k (In: 837.8k, Out: 105.1k) ($0.69)",
    "activeKeys": 91,
    "revokedKeys": 12,
    "Requests": 79,
    "overQuotaKeys": 11,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "xai": {
    "usage": "581.4k (In: 546.8k, Out: 34.6k) ($3.64)",
    "activeKeys": 1,
    "revokedKeys": 0,
    "Requests": 41,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "cohere": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 13,
    "revokedKeys": 0,
    "Requests": 0,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "qwen": {
    "usage": "55.6k (In: 53.2k, Out: 2.4k) ($0.10)",
    "activeKeys": 95,
    "revokedKeys": 4,
    "Requests": 9,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "glm": {
    "usage": "2.54m (In: 2.52m, Out: 22.8k) ($1.04)",
    "activeKeys": 1,
    "revokedKeys": 1,
    "Requests": 52,
    "overQuotaKeys": 16,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "moonshot": {
    "usage": "779.7k (In: 615.4k, Out: 164.4k) ($0.78)",
    "activeKeys": 21,
    "revokedKeys": 4,
    "Requests": 99,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "groq": {
    "usage": "0 tokens ($0.00)",
    "activeKeys": 1,
    "revokedKeys": 19,
    "Requests": 0,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "config": {
    "allowedExpModels": "gemini-3-pro-preview",
    "gatekeeper": "proxy_key",
    "maxIpsAutoBan": "false",
    "captchaMode": "none",
    "powTokenHours": "1488",
    "powTokenMaxIps": "0",
    "powDifficultyLevel": "low",
    "powChallengeTimeout": "30",
    "textModelRateLimit": "8",
    "imageModelRateLimit": "4",
    "maxContextTokensOpenAI": "80000",
    "maxContextTokensAnthropic": "80000",
    "maxOutputTokensOpenAI": "15000",
    "maxOutputTokensAnthropic": "15000",
    "useRemoteTokenCounting": "true",
    "allowAwsLogging": "false",
    "promptLogging": "false",
    "tokenQuota": {
      "moonshot": "0",
      "qwen": "0",
      "glm": "0",
      "cohere": "0",
      "xai": "0",
      "deepseek": "0",
      "groq": "0",
      "turbo": "0",
      "gpt4": "0",
      "gpt4-32k": "0",
      "gpt4-turbo": "0",
      "gpt4o": "0",
      "gpt45": "0",
      "gpt41": "0",
      "gpt41-mini": "0",
      "gpt41-nano": "0",
      "gpt5": "0",
      "gpt5-mini": "0",
      "gpt5-nano": "0",
      "gpt5-chat-latest": "0",
      "gpt5-pro": "0",
      "o1": "0",
      "o1-mini": "0",
      "o1-pro": "0",
      "o3-pro": "0",
      "o3-mini": "0",
      "o3": "0",
      "o4-mini": "0",
      "codex-mini": "0",
      "dall-e": "0",
      "gpt-image": "0",
      "claude": "0",
      "claude-opus": "0",
      "gemini-flash": "0",
      "gemini-pro": "0",
      "gemini-ultra": "0",
      "mistral-tiny": "0",
      "mistral-small": "0",
      "mistral-medium": "0",
      "mistral-large": "0",
      "aws-claude": "0",
      "aws-claude-opus": "0",
      "aws-mistral-tiny": "0",
      "aws-mistral-small": "0",
      "aws-mistral-medium": "0",
      "aws-mistral-large": "0",
      "gcp-claude": "0",
      "gcp-claude-opus": "0",
      "azure-turbo": "0",
      "azure-gpt4": "0",
      "azure-gpt4-32k": "0",
      "azure-gpt4-turbo": "0",
      "azure-gpt4o": "0",
      "azure-gpt45": "0",
      "azure-gpt41": "0",
      "azure-gpt41-mini": "0",
      "azure-gpt41-nano": "0",
      "azure-gpt5": "0",
      "azure-gpt5-mini": "0",
      "azure-gpt5-nano": "0",
      "azure-gpt5-chat-latest": "0",
      "azure-gpt5-pro": "0",
      "azure-dall-e": "0",
      "azure-o1": "0",
      "azure-o1-mini": "0",
      "azure-o1-pro": "0",
      "azure-o3-pro": "0",
      "azure-o3-mini": "0",
      "azure-o3": "0",
      "azure-o4-mini": "0",
      "azure-codex-mini": "0",
      "azure-gpt-image": "0",
      "openrouter-paid": "0",
      "openrouter-free": "0",
      "groq-allam-2-7b": "0",
      "groq-compound": "0",
      "groq-compound-mini": "0",
      "groq-llama-4-maverick-17b-128e-instruct": "0",
      "groq-llama-4-scout-17b-16e-instruct": "0",
      "groq-llama-guard-4-12b": "0",
      "groq-llama-prompt-guard-2-22m": "0",
      "groq-llama-prompt-guard-2-86m": "0",
      "groq-llama-3.3-70b-versatile": "0",
      "groq-llama-3.1-8b-instant": "0",
      "groq-kimi-k2-instruct": "0",
      "groq-kimi-k2-instruct-0905": "0",
      "groq-gpt-oss-safeguard-20b": "0",
      "groq-gpt-oss-120b": "0",
      "groq-gpt-oss-20b": "0",
      "groq-qwen3-32b": "0"
    },
    "allowOpenAIToolUsage": "true",
    "allowedVisionServices": "openai,anthropic,google-ai,mistral-ai,aws",
    "tokensPunishmentFactor": "0",
    "serviceInfoAuthMode": "token"
  }
}