createModel method
Future<InferenceModel>
createModel({
- required ModelType modelType,
- ModelFileType fileType = ModelFileType.task,
- int maxTokens = 1024,
- PreferredBackend? preferredBackend,
- List<
int> ? loraRanks, - int? maxNumImages,
- bool supportImage = false,
override
Creates and returns a new InferenceModel instance.
modelType
— model type to create.
maxTokens
— maximum context length for the model.
preferredBackend
— backend preference (e.g., CPU, GPU).
loraRanks
— optional supported LoRA ranks.
maxNumImages
— maximum number of images (for multimodal models).
supportImage
— whether the model supports images.
Implementation
@override
Future<InferenceModel> createModel({
required ModelType modelType,
ModelFileType fileType = ModelFileType.task,
int maxTokens = 1024,
PreferredBackend? preferredBackend,
List<int>? loraRanks,
int? maxNumImages,
bool supportImage = false, // Enabling image support
}) {
// TODO: Implement multimodal support for web
if (supportImage || maxNumImages != null) {
if (kDebugMode) {
print('Warning: Image support is not yet implemented for web platform');
}
}
final model = _initializedModel ??= WebInferenceModel(
modelType: modelType,
fileType: fileType,
maxTokens: maxTokens,
loraRanks: loraRanks,
modelManager: modelManager,
supportImage: supportImage, // Passing the flag
maxNumImages: maxNumImages,
onClose: () {
_initializedModel = null;
},
);
return Future.value(model);
}