gemini property

Google Gemini configuration using OpenAI-compatible interface

Implementation

static final OpenAICompatibleProviderConfig gemini =
    OpenAICompatibleProviderConfig(
  providerId: 'google-openai',
  displayName: 'Google Gemini (OpenAI-compatible)',
  description: 'Google Gemini models using OpenAI-compatible interface',
  defaultBaseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai/',
  defaultModel: 'gemini-2.0-flash',
  supportedCapabilities: {
    LLMCapability.chat,
    LLMCapability.streaming,
    LLMCapability.toolCalling,
    LLMCapability.reasoning,
    LLMCapability.embedding,
  },
  supportsReasoningEffort: true,
  supportsStructuredOutput: true,
  parameterMappings: {
    'reasoning_effort': 'reasoning_effort', // low, medium, high
    'include_thoughts': 'include_thoughts', // Google-specific thinking config
    'thinking_budget': 'thinking_budget', // Google-specific thinking budget
  },
  // Use Google-specific transformers for thinking support
  requestBodyTransformer: GoogleRequestBodyTransformer(),
  headersTransformer: GoogleHeadersTransformer(),
  modelConfigs: {
    'gemini-2.0-flash': ModelCapabilityConfig(
      supportsReasoning: false,
      supportsVision: true,
      supportsToolCalling: true,
      maxContextLength: 1000000,
    ),
    'gemini-2.5-flash-preview-05-20': ModelCapabilityConfig(
      supportsReasoning: true,
      supportsVision: true,
      supportsToolCalling: true,
      maxContextLength: 1000000,
    ),
    'text-embedding-004': ModelCapabilityConfig(
      supportsReasoning: false,
      supportsVision: false,
      supportsToolCalling: false,
      maxContextLength: 2048,
    ),
  },
);