getActiveModel static method

Future<InferenceModel> getActiveModel({
  1. int maxTokens = 1024,
  2. PreferredBackend? preferredBackend,
  3. bool supportImage = false,
  4. int? maxNumImages,
})

Get the active inference model as a ready-to-use InferenceModel

Returns an InferenceModel configured with runtime parameters. The model type and file type come from the active InferenceModelSpec.

Runtime parameters:

  • maxTokens: Maximum context size (default: 1024)
  • preferredBackend: CPU or GPU preference (optional)
  • supportImage: Enable multimodal image support (default: false)
  • maxNumImages: Maximum number of images if supportImage is true

Throws:

Example:

// Install model first
await FlutterGemma.installModel(
  modelType: ModelType.gemmaIt,
).fromNetwork('https://example.com/model.task').install();

// Create with short context
final shortModel = await FlutterGemma.getActiveModel(
  maxTokens: 512,
);

// Create with long context and GPU
final longModel = await FlutterGemma.getActiveModel(
  maxTokens: 4096,
  preferredBackend: PreferredBackend.gpu,
);

Implementation

static Future<InferenceModel> getActiveModel({
  int maxTokens = 1024,
  PreferredBackend? preferredBackend,
  bool supportImage = false,
  int? maxNumImages,
}) async {
  final manager = FlutterGemmaPlugin.instance.modelManager;
  final activeSpec = manager.activeInferenceModel;

  if (activeSpec == null) {
    throw StateError(
      'No active inference model set. Use FlutterGemma.installModel() first.',
    );
  }

  if (activeSpec is! InferenceModelSpec) {
    throw StateError(
      'Active model is not an InferenceModelSpec. '
      'Expected InferenceModelSpec, got ${activeSpec.runtimeType}',
    );
  }

  // Create InferenceModel using identity from spec + runtime params
  return await FlutterGemmaPlugin.instance.createModel(
    modelType: activeSpec.modelType,
    fileType: activeSpec.fileType,
    maxTokens: maxTokens,
    preferredBackend: preferredBackend,
    supportImage: supportImage,
    maxNumImages: maxNumImages,
  );
}